Compare commits
No commits in common. "main" and "e4e368a832b97be638fd4c1ca11c05ceff14b711" have entirely different histories.
main
...
e4e368a832
19
.gitignore
vendored
19
.gitignore
vendored
@ -1,9 +1,12 @@
|
|||||||
public/
|
# Project dependencies
|
||||||
|
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
|
||||||
|
.cache
|
||||||
|
node_modules
|
||||||
|
yarn-error.log
|
||||||
|
|
||||||
|
# Build directory
|
||||||
|
/public
|
||||||
|
|
||||||
*.sw*
|
*.sw*
|
||||||
resources/
|
|
||||||
hugo_build.lock
|
.now
|
||||||
.DS_Store
|
|
||||||
.vercel
|
|
||||||
.venv
|
|
||||||
gemini/capsule/log
|
|
||||||
gemini/capsule/notes
|
|
BIN
.nova/Artwork
BIN
.nova/Artwork
Binary file not shown.
Before Width: | Height: | Size: 6.6 KiB |
@ -1,4 +0,0 @@
|
|||||||
{
|
|
||||||
"workspace.art_style" : 1,
|
|
||||||
"workspace.color" : 5
|
|
||||||
}
|
|
5
.prettierrc
Normal file
5
.prettierrc
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"semi": false,
|
||||||
|
"singleQuote": true,
|
||||||
|
"trailingComma": "es5"
|
||||||
|
}
|
@ -1,27 +0,0 @@
|
|||||||
branches: main
|
|
||||||
|
|
||||||
pipeline:
|
|
||||||
build:
|
|
||||||
image: hugomods/hugo:latest
|
|
||||||
commands:
|
|
||||||
- hugo -D
|
|
||||||
deploy:
|
|
||||||
image: alpine
|
|
||||||
secrets: [ S3_ACCESS_KEY, S3_SECRET_ACCESS_KEY, BUNNY_KEY ]
|
|
||||||
commands:
|
|
||||||
- apk update
|
|
||||||
- apk add s3cmd curl
|
|
||||||
- s3cmd --configure --access_key=$S3_ACCESS_KEY --secret_key=$S3_SECRET_ACCESS_KEY --host=https://eu-central-1.linodeobjects.com --host-bucket="%(bucket)s.eu-central-1.linodeobjects.com" --dump-config > /root/.s3cfg
|
|
||||||
- s3cmd -c /root/.s3cfg sync --no-mime-magic --guess-mime-type public/* s3://wilw.dev
|
|
||||||
- 'curl -X POST -H "AccessKey: $BUNNY_KEY" https://api.bunny.net/pullzone/907104/purgeCache'
|
|
||||||
deploy_gemini:
|
|
||||||
image: python:3.10
|
|
||||||
secrets: [ CAPSULE_TOWN_KEY ]
|
|
||||||
commands:
|
|
||||||
- cd gemini
|
|
||||||
- pip install python-frontmatter
|
|
||||||
- python process_capsule.py
|
|
||||||
- CAPSULE=$(tar -czf /tmp/c.tar.gz -C capsule . && cat /tmp/c.tar.gz | base64)
|
|
||||||
- 'echo "{\"capsuleArchive\": \"$CAPSULE\"}" > /tmp/capsule_file'
|
|
||||||
- 'curl -X PUT -H "Content-Type: application/json" -H "api-key: $CAPSULE_TOWN_KEY" -d @/tmp/capsule_file https://api.capsule.town/capsule'
|
|
||||||
|
|
39
Taskfile.yml
39
Taskfile.yml
@ -1,39 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
default:
|
|
||||||
desc: Run Hugo server
|
|
||||||
deps:
|
|
||||||
- hugo server
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
desc: Full deployment
|
|
||||||
deps:
|
|
||||||
- deploy-web
|
|
||||||
- deploy-gemini
|
|
||||||
|
|
||||||
deploy-web:
|
|
||||||
desc: Deploy website
|
|
||||||
cmds:
|
|
||||||
- hugo
|
|
||||||
- aws --profile personal s3 sync public s3://wilw.dev
|
|
||||||
- 'curl -X POST -H "AccessKey: $BUNNY_PERSONAL" https://api.bunny.net/pullzone/907104/purgeCache'
|
|
||||||
|
|
||||||
deploy-gemini:
|
|
||||||
desc: Deploy Gemini capsule
|
|
||||||
dir: 'gemini'
|
|
||||||
deps:
|
|
||||||
- install-gemini-deps
|
|
||||||
cmds:
|
|
||||||
- bash -c "source .venv/bin/activate && python process_capsule.py"
|
|
||||||
- 'export CAPSULE=$(tar -czf /tmp/c.tar.gz -C capsule . && cat /tmp/c.tar.gz | base64) && echo "{\"capsuleArchive\": \"$CAPSULE\"}" > /tmp/capsule_file'
|
|
||||||
- 'curl -X PUT -H "Content-Type: application/json" -H "api-key: $CAPSULE_TOWN_KEY" -d @/tmp/capsule_file https://api.capsule.town/capsule'
|
|
||||||
|
|
||||||
install-gemini-deps:
|
|
||||||
desc: Install Python dependencies for Gemini
|
|
||||||
dir: 'gemini'
|
|
||||||
cmds:
|
|
||||||
- cmd: python3.12 -m venv .venv
|
|
||||||
ignore_error: true
|
|
||||||
- cmd: bash -c "source .venv/bin/activate && pip install python-frontmatter"
|
|
||||||
ignore_error: true
|
|
1
acnh1.png
Normal file
1
acnh1.png
Normal file
@ -0,0 +1 @@
|
|||||||
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html><head><meta http-equiv="refresh" content="0;url=http://advancedsearch2.virginmedia.com/main?ParticipantID=jqlc435patgs4w79dx7g33u8otdryt35&FailedURI=http%3A%2F%2Fstatic%2Fmedia%2Fblog%2Facnh1.png&FailureMode=1&Implementation=&AddInType=4&Version=pywr1.0&ClientLocation=uk"/><script type="text/javascript">url="http://advancedsearch2.virginmedia.com/main?ParticipantID=jqlc435patgs4w79dx7g33u8otdryt35&FailedURI=http%3A%2F%2Fstatic%2Fmedia%2Fblog%2Facnh1.png&FailureMode=1&Implementation=&AddInType=4&Version=pywr1.0&ClientLocation=uk";if(top.location!=location){var w=window,d=document,e=d.documentElement,b=d.body,x=w.innerWidth||e.clientWidth||b.clientWidth,y=w.innerHeight||e.clientHeight||b.clientHeight;url+="&w="+x+"&h="+y;}window.location.replace(url);</script></head><body></body></html>
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: "{{ replace .Name "-" " " | title }}"
|
|
||||||
date: {{ .Date }}
|
|
||||||
draft: true
|
|
||||||
---
|
|
||||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 6.6 KiB |
@ -1,86 +0,0 @@
|
|||||||
/* Background */ .bg { color:#f8f8f2;background-color:#272822; }
|
|
||||||
/* PreWrapper */ .chroma { color:#f8f8f2;background-color:#272822; }
|
|
||||||
/* Other */ .chroma .x { }
|
|
||||||
/* Error */ .chroma .err { color:#960050;background-color:#1e0010 }
|
|
||||||
/* CodeLine */ .chroma .cl { }
|
|
||||||
/* LineLink */ .chroma .lnlinks { outline:none;text-decoration:none;color:inherit }
|
|
||||||
/* LineTableTD */ .chroma .lntd { vertical-align:top;padding:0;margin:0;border:0; }
|
|
||||||
/* LineTable */ .chroma .lntable { border-spacing:0;padding:0;margin:0;border:0; }
|
|
||||||
/* LineHighlight */ .chroma .hl { background-color:#3c3d38 }
|
|
||||||
/* LineNumbersTable */ .chroma .lnt { white-space:pre;-webkit-user-select:none;user-select:none;margin-right:0.4em;padding:0 0.4em 0 0.4em;color:#7f7f7f }
|
|
||||||
/* LineNumbers */ .chroma .ln { white-space:pre;-webkit-user-select:none;user-select:none;margin-right:0.4em;padding:0 0.4em 0 0.4em;color:#7f7f7f }
|
|
||||||
/* Line */ .chroma .line { display:flex; }
|
|
||||||
/* Keyword */ .chroma .k { color:#66d9ef }
|
|
||||||
/* KeywordConstant */ .chroma .kc { color:#66d9ef }
|
|
||||||
/* KeywordDeclaration */ .chroma .kd { color:#66d9ef }
|
|
||||||
/* KeywordNamespace */ .chroma .kn { color:#f92672 }
|
|
||||||
/* KeywordPseudo */ .chroma .kp { color:#66d9ef }
|
|
||||||
/* KeywordReserved */ .chroma .kr { color:#66d9ef }
|
|
||||||
/* KeywordType */ .chroma .kt { color:#66d9ef }
|
|
||||||
/* Name */ .chroma .n { }
|
|
||||||
/* NameAttribute */ .chroma .na { color:#a6e22e }
|
|
||||||
/* NameBuiltin */ .chroma .nb { }
|
|
||||||
/* NameBuiltinPseudo */ .chroma .bp { }
|
|
||||||
/* NameClass */ .chroma .nc { color:#a6e22e }
|
|
||||||
/* NameConstant */ .chroma .no { color:#66d9ef }
|
|
||||||
/* NameDecorator */ .chroma .nd { color:#a6e22e }
|
|
||||||
/* NameEntity */ .chroma .ni { }
|
|
||||||
/* NameException */ .chroma .ne { color:#a6e22e }
|
|
||||||
/* NameFunction */ .chroma .nf { color:#a6e22e }
|
|
||||||
/* NameFunctionMagic */ .chroma .fm { }
|
|
||||||
/* NameLabel */ .chroma .nl { }
|
|
||||||
/* NameNamespace */ .chroma .nn { }
|
|
||||||
/* NameOther */ .chroma .nx { color:#a6e22e }
|
|
||||||
/* NameProperty */ .chroma .py { }
|
|
||||||
/* NameTag */ .chroma .nt { color:#f92672 }
|
|
||||||
/* NameVariable */ .chroma .nv { }
|
|
||||||
/* NameVariableClass */ .chroma .vc { }
|
|
||||||
/* NameVariableGlobal */ .chroma .vg { }
|
|
||||||
/* NameVariableInstance */ .chroma .vi { }
|
|
||||||
/* NameVariableMagic */ .chroma .vm { }
|
|
||||||
/* Literal */ .chroma .l { color:#ae81ff }
|
|
||||||
/* LiteralDate */ .chroma .ld { color:#e6db74 }
|
|
||||||
/* LiteralString */ .chroma .s { color:#e6db74 }
|
|
||||||
/* LiteralStringAffix */ .chroma .sa { color:#e6db74 }
|
|
||||||
/* LiteralStringBacktick */ .chroma .sb { color:#e6db74 }
|
|
||||||
/* LiteralStringChar */ .chroma .sc { color:#e6db74 }
|
|
||||||
/* LiteralStringDelimiter */ .chroma .dl { color:#e6db74 }
|
|
||||||
/* LiteralStringDoc */ .chroma .sd { color:#e6db74 }
|
|
||||||
/* LiteralStringDouble */ .chroma .s2 { color:#e6db74 }
|
|
||||||
/* LiteralStringEscape */ .chroma .se { color:#ae81ff }
|
|
||||||
/* LiteralStringHeredoc */ .chroma .sh { color:#e6db74 }
|
|
||||||
/* LiteralStringInterpol */ .chroma .si { color:#e6db74 }
|
|
||||||
/* LiteralStringOther */ .chroma .sx { color:#e6db74 }
|
|
||||||
/* LiteralStringRegex */ .chroma .sr { color:#e6db74 }
|
|
||||||
/* LiteralStringSingle */ .chroma .s1 { color:#e6db74 }
|
|
||||||
/* LiteralStringSymbol */ .chroma .ss { color:#e6db74 }
|
|
||||||
/* LiteralNumber */ .chroma .m { color:#ae81ff }
|
|
||||||
/* LiteralNumberBin */ .chroma .mb { color:#ae81ff }
|
|
||||||
/* LiteralNumberFloat */ .chroma .mf { color:#ae81ff }
|
|
||||||
/* LiteralNumberHex */ .chroma .mh { color:#ae81ff }
|
|
||||||
/* LiteralNumberInteger */ .chroma .mi { color:#ae81ff }
|
|
||||||
/* LiteralNumberIntegerLong */ .chroma .il { color:#ae81ff }
|
|
||||||
/* LiteralNumberOct */ .chroma .mo { color:#ae81ff }
|
|
||||||
/* Operator */ .chroma .o { color:#f92672 }
|
|
||||||
/* OperatorWord */ .chroma .ow { color:#f92672 }
|
|
||||||
/* Punctuation */ .chroma .p { }
|
|
||||||
/* Comment */ .chroma .c { color:#75715e }
|
|
||||||
/* CommentHashbang */ .chroma .ch { color:#75715e }
|
|
||||||
/* CommentMultiline */ .chroma .cm { color:#75715e }
|
|
||||||
/* CommentSingle */ .chroma .c1 { color:#75715e }
|
|
||||||
/* CommentSpecial */ .chroma .cs { color:#75715e }
|
|
||||||
/* CommentPreproc */ .chroma .cp { color:#75715e }
|
|
||||||
/* CommentPreprocFile */ .chroma .cpf { color:#75715e }
|
|
||||||
/* Generic */ .chroma .g { }
|
|
||||||
/* GenericDeleted */ .chroma .gd { color:#f92672 }
|
|
||||||
/* GenericEmph */ .chroma .ge { font-style:italic }
|
|
||||||
/* GenericError */ .chroma .gr { }
|
|
||||||
/* GenericHeading */ .chroma .gh { }
|
|
||||||
/* GenericInserted */ .chroma .gi { color:#a6e22e }
|
|
||||||
/* GenericOutput */ .chroma .go { }
|
|
||||||
/* GenericPrompt */ .chroma .gp { }
|
|
||||||
/* GenericStrong */ .chroma .gs { font-weight:bold }
|
|
||||||
/* GenericSubheading */ .chroma .gu { color:#75715e }
|
|
||||||
/* GenericTraceback */ .chroma .gt { }
|
|
||||||
/* GenericUnderline */ .chroma .gl { }
|
|
||||||
/* TextWhitespace */ .chroma .w { }
|
|
448
assets/main.scss
448
assets/main.scss
@ -1,448 +0,0 @@
|
|||||||
body {
|
|
||||||
font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
|
|
||||||
margin: 0px;
|
|
||||||
min-height: 100vh;
|
|
||||||
display: flex;
|
|
||||||
flex-direction: column;
|
|
||||||
}
|
|
||||||
|
|
||||||
header {
|
|
||||||
max-width:960px;
|
|
||||||
margin: 10px auto;
|
|
||||||
width: 100%;
|
|
||||||
.main{
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
margin-bottom: 10px;
|
|
||||||
padding: 0px 10px 0px 10px;
|
|
||||||
a.avatar {
|
|
||||||
display: inline-block;
|
|
||||||
margin-right: 10px;
|
|
||||||
img {
|
|
||||||
height: 50px;
|
|
||||||
width: 50px;
|
|
||||||
border-radius: 50%;
|
|
||||||
transition: opacity 0.2s;
|
|
||||||
&:hover{
|
|
||||||
opacity: 0.8;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.details{
|
|
||||||
flex: 1;
|
|
||||||
a.title{
|
|
||||||
display: inline-block;
|
|
||||||
font-size: 23px;
|
|
||||||
margin-bottom: 5px;
|
|
||||||
text-decoration: none;
|
|
||||||
font-weight: bold;
|
|
||||||
color: initial;
|
|
||||||
}
|
|
||||||
.socials{
|
|
||||||
a{
|
|
||||||
text-decoration: none;
|
|
||||||
margin-right: 5px;
|
|
||||||
margin-bottom: 5px;
|
|
||||||
padding: 4px;
|
|
||||||
border-radius: 5px;
|
|
||||||
background: linen;
|
|
||||||
color: rgba(0,0,0,0.75);
|
|
||||||
display: inline-block;
|
|
||||||
transition: background 0.2s;
|
|
||||||
&:hover{
|
|
||||||
background: lightskyblue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nav{
|
|
||||||
padding: 0px 10px 0px 10px;
|
|
||||||
display: flex;
|
|
||||||
background: rgb(247,244,244);
|
|
||||||
border-radius: 4px;
|
|
||||||
padding: 0px 10px;
|
|
||||||
a{
|
|
||||||
padding: 10px 10px;
|
|
||||||
margin-right: 5px;
|
|
||||||
display:inline-block;
|
|
||||||
text-transform: uppercase;
|
|
||||||
font-size: 15px;
|
|
||||||
color: black;
|
|
||||||
&.active{
|
|
||||||
text-decoration: none;
|
|
||||||
font-weight: bold;
|
|
||||||
background: rgb(240,237,237);;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
main {
|
|
||||||
width: 100%;
|
|
||||||
max-width:960px;
|
|
||||||
margin: 0px auto;
|
|
||||||
flex: 1;
|
|
||||||
.content-wrapper{
|
|
||||||
padding: 0px 10px 10px;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
footer {
|
|
||||||
border-top: 2px solid linen;
|
|
||||||
padding: 10px 15px 10px 15px;
|
|
||||||
margin-top: 30px;
|
|
||||||
@media (min-width: 900px) {
|
|
||||||
display: flex;
|
|
||||||
justify-content: space-between;
|
|
||||||
align-items: center;
|
|
||||||
}
|
|
||||||
.left {
|
|
||||||
display:flex;
|
|
||||||
align-items: baseline;
|
|
||||||
@media (max-width: 550px) {
|
|
||||||
justify-content: center;
|
|
||||||
display: initial;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
.copyright{
|
|
||||||
font-size: 13px;
|
|
||||||
color: rgb(100,100,100);
|
|
||||||
}
|
|
||||||
a {
|
|
||||||
margin-left: 10px;
|
|
||||||
font-size: 13px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.right{
|
|
||||||
@media (min-width: 550px) {
|
|
||||||
display: flex;
|
|
||||||
justify-content: space-between;
|
|
||||||
align-items: center;
|
|
||||||
}
|
|
||||||
.carbonbadge{
|
|
||||||
@media (min-width: 550px) {
|
|
||||||
margin-left: 10px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.kb-club {
|
|
||||||
font-size: 1em;
|
|
||||||
|
|
||||||
@media (max-width: 550px) {
|
|
||||||
display: block;
|
|
||||||
margin: 10px auto;
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
a {
|
|
||||||
text-decoration: none;
|
|
||||||
color: #212121;
|
|
||||||
padding: .25rem 0;
|
|
||||||
}
|
|
||||||
.kb-club-bg, .kb-club-no-bg {
|
|
||||||
border: 1px solid rgb(2, 90, 84);
|
|
||||||
padding: 3px 6px;
|
|
||||||
}
|
|
||||||
.kb-club-no-bg{
|
|
||||||
border-radius: 4px 0px 0px 4px;
|
|
||||||
}
|
|
||||||
.kb-club-bg {
|
|
||||||
font-weight: bold;
|
|
||||||
background: rgb(2, 90, 84);
|
|
||||||
color: white;
|
|
||||||
border-radius: 0px 4px 4px 0px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
table {
|
|
||||||
width: 100%;
|
|
||||||
thead {
|
|
||||||
background: rgb(230,230,230);
|
|
||||||
th {
|
|
||||||
padding: 5px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tbody{
|
|
||||||
background: rgb(245,245,245);
|
|
||||||
td {
|
|
||||||
padding: 5px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.alert {
|
|
||||||
background-color: lightcyan;
|
|
||||||
border-radius: 5px;
|
|
||||||
max-width: 800px;
|
|
||||||
margin: 20px auto;
|
|
||||||
padding: 10px 10px 10px 20px;
|
|
||||||
border-left: 10px solid rgba(0,0,0,0.2);
|
|
||||||
h3 {
|
|
||||||
margin-top: 0px;
|
|
||||||
}
|
|
||||||
&.green{
|
|
||||||
background-color: #F5F5DC;
|
|
||||||
}
|
|
||||||
&.grey{
|
|
||||||
background-color: rgb(240,240,240);
|
|
||||||
}
|
|
||||||
&.non-centered{
|
|
||||||
margin: 20px 0px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.two-columns{
|
|
||||||
display: grid;
|
|
||||||
grid-column-gap: 20px;
|
|
||||||
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
|
||||||
}
|
|
||||||
|
|
||||||
.nav-layout{
|
|
||||||
.nav {
|
|
||||||
min-width: 250px;
|
|
||||||
border-radius: 10px;
|
|
||||||
background: rgb(247,244,244);
|
|
||||||
.menu {
|
|
||||||
a {
|
|
||||||
display: block;
|
|
||||||
margin-bottom: 8px;
|
|
||||||
padding: 3px 5px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@media only screen and (min-width: 501px) {
|
|
||||||
display: flex;
|
|
||||||
position: relative;
|
|
||||||
.nav{
|
|
||||||
position: sticky;
|
|
||||||
top: 10px;
|
|
||||||
margin-right: 15px;
|
|
||||||
max-height: 80vh;
|
|
||||||
overflow-y: scroll;
|
|
||||||
h3 {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
&::after{
|
|
||||||
position: sticky;
|
|
||||||
display: block;
|
|
||||||
padding: 5px 0px;
|
|
||||||
bottom: 0px;
|
|
||||||
left: 0px;
|
|
||||||
width: 100%;
|
|
||||||
text-align: center;
|
|
||||||
box-shadow: 0px 0px 10px rgba(0,0,0,0.2);
|
|
||||||
background: rgb(220,217,217);
|
|
||||||
font-size: 15px;
|
|
||||||
color: rgb(50,50,50);
|
|
||||||
content: '↕️ This menu is scrollable';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.content {
|
|
||||||
flex: 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@media only screen and (max-width: 500px) {
|
|
||||||
.nav {
|
|
||||||
margin-bottom: 10px;
|
|
||||||
margin-right: 0px;
|
|
||||||
h3 {
|
|
||||||
a {
|
|
||||||
text-decoration: none;
|
|
||||||
&:after {
|
|
||||||
content: '⬇️';
|
|
||||||
margin-left: 10px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&:hover, &:active, &:focus {
|
|
||||||
.menu {
|
|
||||||
display: block;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.menu {
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.project, .research-item {
|
|
||||||
margin-top: 30px;
|
|
||||||
display:flex;
|
|
||||||
align-items: start;
|
|
||||||
.logo{
|
|
||||||
padding-top: 15px;
|
|
||||||
margin-right: 20px;
|
|
||||||
img{
|
|
||||||
max-width: 100px;
|
|
||||||
max-height: 100px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.details{
|
|
||||||
flex: 1;
|
|
||||||
h4{
|
|
||||||
margin-top: 0px;
|
|
||||||
margin-bottom: 10px;
|
|
||||||
}
|
|
||||||
.platforms{
|
|
||||||
a {
|
|
||||||
margin-left: 5px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.journal{
|
|
||||||
font-size: small;
|
|
||||||
margin-top: 2px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.blog-page-header{
|
|
||||||
display: flex;
|
|
||||||
justify-content: space-between;
|
|
||||||
align-items: baseline;
|
|
||||||
}
|
|
||||||
|
|
||||||
.blog-summary, .note-summary {
|
|
||||||
background-color: #FFFAF0;
|
|
||||||
border-radius: 5px;
|
|
||||||
padding: 10px 10px 10px 20px;
|
|
||||||
border-left: 10px solid rgba(0,0,0,0.1);
|
|
||||||
margin-bottom: 15px;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
h3{
|
|
||||||
margin-top: 0px;
|
|
||||||
.date{
|
|
||||||
font-size: small;
|
|
||||||
margin-left: 20px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.summary {
|
|
||||||
* {
|
|
||||||
font-size: smaller;
|
|
||||||
}
|
|
||||||
p {
|
|
||||||
margin-bottom: 10px;
|
|
||||||
}
|
|
||||||
img {
|
|
||||||
max-width: 100%;
|
|
||||||
margin: 10px 0px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&::before {
|
|
||||||
margin-right: 20px;
|
|
||||||
content: '📝';
|
|
||||||
font-size: 30px;
|
|
||||||
}
|
|
||||||
&.blog-summary {
|
|
||||||
&::before {
|
|
||||||
content: '📝';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&.note-summary {
|
|
||||||
background-color: #e3f0fc;
|
|
||||||
&::before {
|
|
||||||
content: '📔';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.blog-post, .note-entry {
|
|
||||||
.details{
|
|
||||||
text-align: center;
|
|
||||||
}
|
|
||||||
.header-image {
|
|
||||||
display: block;
|
|
||||||
max-width: 90%;
|
|
||||||
margin: 20px auto;
|
|
||||||
}
|
|
||||||
.note-details {
|
|
||||||
text-align: left;
|
|
||||||
}
|
|
||||||
.navigation{
|
|
||||||
text-align: center;
|
|
||||||
a {
|
|
||||||
margin-right: 10px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
article{
|
|
||||||
padding-top: 20px;
|
|
||||||
border-top: 1px solid rgb(230,230,230);
|
|
||||||
font-size: large;
|
|
||||||
margin-bottom: 100px;
|
|
||||||
>*:not(.highlight) {
|
|
||||||
display: block;
|
|
||||||
max-width: 500px;
|
|
||||||
margin-left: auto;
|
|
||||||
margin-right: auto;
|
|
||||||
code {
|
|
||||||
background-color: #FDF6E3;
|
|
||||||
border-radius: .3em; color: #657B83;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
img{
|
|
||||||
display: block;
|
|
||||||
margin: 20px auto;
|
|
||||||
max-width: 100%;
|
|
||||||
max-height: 600px;
|
|
||||||
}
|
|
||||||
blockquote{
|
|
||||||
padding: 10px;
|
|
||||||
background-color: rgb(245,245,230);
|
|
||||||
p {
|
|
||||||
margin-top: 0px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pre {
|
|
||||||
white-space: pre-wrap;
|
|
||||||
padding: 5px;
|
|
||||||
box-shadow: inset 0px 0px 10px 0px rgba(0,0,0,0.1);
|
|
||||||
border-radius: 4px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
.tag{
|
|
||||||
display: inline-block;
|
|
||||||
padding: 4px;
|
|
||||||
margin-right: 15px;
|
|
||||||
border-radius: 5px;
|
|
||||||
background-color: azure;
|
|
||||||
color:rgba(0,0,0,0.6);
|
|
||||||
text-decoration: none;
|
|
||||||
&::before {
|
|
||||||
content: '🏷️';
|
|
||||||
margin-right: 5px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
main a, nav a {
|
|
||||||
background: rgb(247,244,244);
|
|
||||||
color: black;
|
|
||||||
padding: 3px;
|
|
||||||
display: inline-block;
|
|
||||||
border-radius: 3px;
|
|
||||||
transition: background 0.2s;
|
|
||||||
&:hover{
|
|
||||||
background: rgb(220,217,217);
|
|
||||||
}
|
|
||||||
&.active{
|
|
||||||
font-weight: bold;
|
|
||||||
text-decoration: none;
|
|
||||||
background: rgb(240,237,237);
|
|
||||||
}
|
|
||||||
// Below courtesy of Christian Oliff (https://christianoliff.com/blog/styling-external-links-with-an-icon-in-css)
|
|
||||||
&[href^="http"]::after,&[href^="https://"]::after {
|
|
||||||
content: "";
|
|
||||||
width: 11px;
|
|
||||||
height: 11px;
|
|
||||||
margin-left: 4px;
|
|
||||||
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='16' height='16' fill='currentColor' viewBox='0 0 16 16'%3E%3Cpath fill-rule='evenodd' d='M8.636 3.5a.5.5 0 0 0-.5-.5H1.5A1.5 1.5 0 0 0 0 4.5v10A1.5 1.5 0 0 0 1.5 16h10a1.5 1.5 0 0 0 1.5-1.5V7.864a.5.5 0 0 0-1 0V14.5a.5.5 0 0 1-.5.5h-10a.5.5 0 0 1-.5-.5v-10a.5.5 0 0 1 .5-.5h6.636a.5.5 0 0 0 .5-.5z'/%3E%3Cpath fill-rule='evenodd' d='M16 .5a.5.5 0 0 0-.5-.5h-5a.5.5 0 0 0 0 1h3.793L6.146 9.146a.5.5 0 1 0 .708.708L15 1.707V5.5a.5.5 0 0 0 1 0v-5z'/%3E%3C/svg%3E");
|
|
||||||
background-position: center;
|
|
||||||
background-repeat: no-repeat;
|
|
||||||
background-size: contain;
|
|
||||||
display: inline-block;
|
|
||||||
}
|
|
||||||
}
|
|
BIN
assets/will.jpg
BIN
assets/will.jpg
Binary file not shown.
Before Width: | Height: | Size: 15 KiB |
@ -1,18 +0,0 @@
|
|||||||
{
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Principal": {
|
|
||||||
"AWS": [
|
|
||||||
"*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Action": [
|
|
||||||
"s3:GetObject"
|
|
||||||
],
|
|
||||||
"Resource": [
|
|
||||||
"arn:aws:s3:::wilw.dev/*"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
49
config.toml
49
config.toml
@ -1,49 +0,0 @@
|
|||||||
baseURL = 'https://wilw.dev'
|
|
||||||
languageCode = 'en-gb'
|
|
||||||
title = 'Will Webberley'
|
|
||||||
|
|
||||||
[params]
|
|
||||||
author = 'Will Webberley'
|
|
||||||
description = "Will Webberley's personal website."
|
|
||||||
|
|
||||||
[permalinks]
|
|
||||||
blog = '/blog/:year/:month/:day/:slug/'
|
|
||||||
|
|
||||||
[menu]
|
|
||||||
[[menu.main]]
|
|
||||||
name = 'About'
|
|
||||||
url = '/'
|
|
||||||
pageRef = '/'
|
|
||||||
[[menu.main]]
|
|
||||||
name = 'Blog'
|
|
||||||
url = '/blog/'
|
|
||||||
pageRef = 'blog'
|
|
||||||
[[menu.main]]
|
|
||||||
name = '🌱 Notes'
|
|
||||||
url = '/notes/'
|
|
||||||
pageRef = 'notes'
|
|
||||||
[[menu.main]]
|
|
||||||
name = 'Projects'
|
|
||||||
url = '/projects/'
|
|
||||||
pageRef = 'projects'
|
|
||||||
|
|
||||||
[outputFormats]
|
|
||||||
[outputFormats.RSS]
|
|
||||||
mediatype = "application/rss"
|
|
||||||
baseName = "rss"
|
|
||||||
|
|
||||||
[markup]
|
|
||||||
[markup.highlight]
|
|
||||||
anchorLineNos = true
|
|
||||||
codeFences = true
|
|
||||||
guessSyntax = false
|
|
||||||
hl_Lines = ''
|
|
||||||
hl_inline = false
|
|
||||||
lineAnchors = ''
|
|
||||||
lineNoStart = 1
|
|
||||||
lineNos = false
|
|
||||||
lineNumbersInTable = true
|
|
||||||
noClasses = false
|
|
||||||
noHl = false
|
|
||||||
tabWidth = 2
|
|
||||||
pygmentsUseClasses = true
|
|
@ -1,39 +0,0 @@
|
|||||||
---
|
|
||||||
title: Home
|
|
||||||
description: Will Webberley's Personal Website
|
|
||||||
---
|
|
||||||
|
|
||||||
**Hello and welcome.** I'm a technology lead & enthusiast in Wales. I enjoy 🏋️♂️ fitness, I love to ✈️ travel, and I'm a 🐶 proud dog dad!
|
|
||||||
|
|
||||||
I'm into startups & small businesses, indie or open-source tech projects, and [self-hosting](/tags/selfhost).
|
|
||||||
|
|
||||||
## Background
|
|
||||||
|
|
||||||
💡 Since 2016 I have been Chief Technology Officer at enterprise SaaS company [Simply Do Ideas](https://www.simplydo.co.uk). Before this I was a software engineer at [Chaser](https://www.chaserhq.com).
|
|
||||||
|
|
||||||
📦 I build and maintain a number of projects - both [open-source](/projects) and commercial. I am a co-founder of [Trialflare](https://www.trialflare.com).
|
|
||||||
|
|
||||||
🎓 I completed [my PhD](/research#phd) at [Cardiff University](https://cardiff.ac.uk)'s [School of Computer Science & Informatics](https://www.cardiff.ac.uk/computer-science) in 2015.
|
|
||||||
|
|
||||||
🤓 I worked on the IBM-led UK MoD and US Army Research Labs coalition [ITA project](https://en.wikipedia.org/wiki/NIS-ITA) as a postdoctoral research associate.
|
|
||||||
|
|
||||||
👨🏫 I lectured the Advanced Computer Science MSc module Web & Social Computing
|
|
||||||
and the Computer Science BSc module Human-Computer Interaction
|
|
||||||
|
|
||||||
## What's on this website?
|
|
||||||
|
|
||||||
📝 I write about technology and things I find interesting on [my blog](/blog) *([📥 RSS feeds](/feeds) available).*
|
|
||||||
|
|
||||||
🌱 I curate a collection of [thoughts, links and notes](/notes).
|
|
||||||
|
|
||||||
🚀 I (occasionally) publish additional content on my geminispace at [gemini://wilw.capsule.town](gemini://wilw.capsule.town) *(see [this post](/blog/2021/01/20/project-gemini) for help with opening this link).*
|
|
||||||
|
|
||||||
👨💻 Some of my research publications are [available here](/research).
|
|
||||||
|
|
||||||
🪴 [Find out more](/this) about this website and its purpose.
|
|
||||||
|
|
||||||
## How to contact me
|
|
||||||
|
|
||||||
You can follow me on Mastodon (📣 [@wilw@fosstodon.org](https://fosstodon.org/@wilw)) and Pixelfed (🖼️ [@wilw@pixelfed.social](https://pixelfed.social/@wilw)).
|
|
||||||
|
|
||||||
You can also get in touch directly with me on Telegram ([@wilw88](https://t.me/wilw88)).
|
|
@ -1,32 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2012-09-20T09:15:00Z"
|
|
||||||
title: DigiSocial Hackathon
|
|
||||||
description: "My attendance at the DigiSocial Hackathon"
|
|
||||||
tags: [event, cardiffuniversity]
|
|
||||||
---
|
|
||||||
|
|
||||||
We recently held our DigiSocial Hackathon. This was a collaboration between the Schools of
|
|
||||||
Computer Science and Social Sciences and was organised by myself and a few others.
|
|
||||||
|
|
||||||
The website for the event is hosted [here](http://users.cs.cf.ac.uk/W.M.Webberley/digisocial/).
|
|
||||||
|
|
||||||
![DigiSocial logo](/media/blog/digisocial_logo.png)
|
|
||||||
|
|
||||||
The idea of the event was to try and encourage further ties between the different Schools of the University. The
|
|
||||||
University Graduate College (UGC) provide the funding for these events, which must be applied for, in the hope
|
|
||||||
that good projects or results come out of it.
|
|
||||||
|
|
||||||
We had relatively good responses from the Schools of Maths, Social Sciences, Medicine, and ourselves, and had a turnout of around 10-15
|
|
||||||
for the event on the 15th and 16th September. Initially, we started to develop ideas for potential projects. Because of the
|
|
||||||
nature of the event, we wanted to make sure they were as cross-disciplined as possible. A hackday, in itself, is pretty
|
|
||||||
computer science-y so we needed to apply a social or medical spin on our ideas.
|
|
||||||
|
|
||||||
Eventually, we settled into two groups: one working on a social-themed project based on crimes in an area (both in terms of
|
|
||||||
distribution and intensity) in relation to the food hygiene levels in nearby establishments; another focusing on hospital wait times
|
|
||||||
and free beds in South Wales. Effectively, then, both projects are visualisations of publicly-available datasets.
|
|
||||||
|
|
||||||
I worked on the social project with Matt Williams, Wil Chivers and Martin Chorley, and it is viewable [here](http://ukcrimemashup.nomovingparts.net/).
|
|
||||||
|
|
||||||
Overall the event was a reasonable success; two projects were
|
|
||||||
completed and we have now made links with the other Schools which will hopefully allow us to do similar events together in the
|
|
||||||
future.
|
|
@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-01-21T17:00:00Z"
|
|
||||||
title: Research Poster Day
|
|
||||||
description: "Attending the research poster day"
|
|
||||||
tags: [research, cardiffuniversity]
|
|
||||||
---
|
|
||||||
|
|
||||||
Each January the School of Computer Science hosts a poster day in order for the research students to demonstrate their current work to
|
|
||||||
other research students, research staff and undergraduates. The event lets members of the department see what other research is being done outside of their own group and gives researchers an opportunity to defend their research ideas.
|
|
||||||
|
|
||||||
This year, I focused on my current research area, which is to do with inferring how interesting a Tweet is based on a comparison between simulated retweet patterns and the propagation behaviour demonstrated by the Tweet in Twitter itself. The poster highlights recent work in the build-up to this, a general overview of how the research works, and finishes with where I want to take this research in the future.
|
|
@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-02-18T17:00:00Z"
|
|
||||||
title: ScriptSlide
|
|
||||||
description: "Small JS library: scriptslide"
|
|
||||||
tags: [javascript, project, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
I've taken to writing most of my recent presentations in plain HTML (rather than using third-party software or services). I used
|
|
||||||
JavaScript to handle the appearance and ordering of slides.
|
|
||||||
|
|
||||||
I bundled the JS into a single script, `js/scriptslide.js` which can be configured
|
|
||||||
using the `js/config.js` script.
|
|
||||||
|
|
||||||
There is a [GitHub repo](https://github.com/willwebberley/ScriptSlide) for the code, along with example usage and instructions.
|
|
||||||
|
|
||||||
Most configuration can be done by using the `js/config.js` script, which supports many features including:
|
|
||||||
|
|
||||||
- Set the slide transition type (appear, fade, slide)
|
|
||||||
- Set the logos, page title, etc.
|
|
||||||
- Configure the colour scheme
|
|
||||||
|
|
||||||
Then simply create an HTML document, set some other styles (there is a template in `css/styles.css`), and
|
|
||||||
put each slide inside `<section>...</section>` tags. The slide menu is then generated autmatically
|
|
||||||
when the page is loaded.
|
|
@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-03-07T17:00:00Z"
|
|
||||||
title: Gower Tides App Released
|
|
||||||
description: "Announcing the release of my Gower Tides Android app"
|
|
||||||
tags: [android, project, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
A [few posts back](/blog/2012/11/13-delving-into-android), I talked
|
|
||||||
about the development of an Android app for tide predictions for South Wales. This app is now on [Google Play](https://play.google.com/store/apps/details?id=net.willwebberley.gowertides).
|
|
||||||
|
|
||||||
If you live in South Wales and are vaguely interested in tides/weather, then you should probably download it :)
|
|
||||||
|
|
||||||
The main advantage is that the app does not need any data connection to display the tidal data, which is useful in areas
|
|
||||||
with low signal. In future, I hope to add further features, such as a more accurate tide graph (using a proper 'wave'),
|
|
||||||
surf reports, and just general UI updates.
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-03-30T17:00:00Z"
|
|
||||||
title: Decking Building
|
|
||||||
description: "Building a deck for the garden"
|
|
||||||
tags: [life]
|
|
||||||
---
|
|
||||||
|
|
||||||
![A new decking](/media/blog/decking.png)
|
|
||||||
|
|
||||||
I managed to turn about two tonnes of material into something vaguely resembling 'decking' in my back garden this weekend. It makes the area look much nicer, but whether it actually stays up is a completely different matter.
|
|
@ -1,46 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-04-05T17:00:00Z"
|
|
||||||
title: "AJAX + Python + Amazon S3"
|
|
||||||
description: "Direct to S3 uploads using Python and AWS S3"
|
|
||||||
tags: [python, aws, s3, technology]
|
|
||||||
slug: ajax-+-python-+-amazon-s3
|
|
||||||
---
|
|
||||||
|
|
||||||
I wanted a way in which users can seamlessly upload images for use in the Heroku application discussed in previous posts.
|
|
||||||
|
|
||||||
Ideally, the image would be uploaded through AJAX as part of a data-entry form, but without having to refresh the page or anything else that would disrupt the user's experience. As far as I know, barebones JQuery does not support AJAX uploads, but [this handy plugin](http://www.malsup.com/jquery/form/#file-upload) does.
|
|
||||||
|
|
||||||
### Handling the upload (AJAX)
|
|
||||||
|
|
||||||
styled the file input nicely (in a similar way to [this guy](http://ericbidelman.tumblr.com/post/14636214755/making-file-inputs-a-pleasure-to-look-at)) and added the JS so that the upload is sent properly (and to the appropriate URL) when a change is detected to the input (i.e. the user does not need to click the 'upload' button to start the upload).
|
|
||||||
|
|
||||||
### Receiving the upload (Python)
|
|
||||||
|
|
||||||
he backend, as previously mentioned, is written in Python as part of a Flask app. Since Heroku's customer webspace is read-only, uploads would have to be stored elsewhere. [Boto](http://boto.s3.amazonaws.com/index.html)'s a cool library for interfacing with various AWS products (including S3) and can easily be installed with `pip install boto`. From this library, we're going to need the `S3Connection` and `Key` classes:
|
|
||||||
|
|
||||||
```
|
|
||||||
from boto.s3.connection import S3Connection
|
|
||||||
from boto.s3.key import Key
|
|
||||||
```
|
|
||||||
|
|
||||||
Now we can easily handle the transfer using the `request` object exposed to Flask's routing methods:
|
|
||||||
|
|
||||||
```
|
|
||||||
file = request.files['file_input_name']
|
|
||||||
con = S3Connection(<'AWS_KEY'>, <'AWS_SECRET'>)
|
|
||||||
key = Key(con.get_bucket(<'BUCKET_NAME'>))
|
|
||||||
key.set_contents_from_file(file)
|
|
||||||
```
|
|
||||||
|
|
||||||
Go to the next step for the AWS details and the bucket name. Depending on where you chose your AWS location as (e.g. US, Europe, etc.), then your file will be accessible as something like `https://s3-eu-west-1.amazonaws.com/<BUCKET_NAME>/<FILENAME>`. If you want, you can also set, among other things, stuff like the file's mime type and access type:
|
|
||||||
|
|
||||||
```
|
|
||||||
key.set_metadata('Content-Type', 'image/png')
|
|
||||||
key.set_acl('public-read')
|
|
||||||
```
|
|
||||||
|
|
||||||
### Setting up the bucket (Amazon S3)
|
|
||||||
|
|
||||||
Finally you'll need to create the bucket. Create or log into your AWS account, go to the AWS console, choose your region (if you're in Europe, then the Ireland one is probably the best choice) and enter the S3 section. Here, create a bucket (the name needs to be globally unique). Now, go to your account settings page to find your AWS access key and secret and plug these, along with the bucket name, into the appropriate places in your Python file.
|
|
||||||
|
|
||||||
And that's it. For large files, this may tie up your Heroku dynos a bit while they carry out the upload, so this technique is best for smaller files (especially if you're only using the one web dyno). My example of a working implementation of this is available [in this file](https://github.com/willwebberley/niteowl-web/blob/master/api.py).
|
|
@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-04-11T17:00:00Z"
|
|
||||||
title: Cardiff Open Sauce Hackathon
|
|
||||||
description: "Attending the Cardiff Open Sauce Hackathon"
|
|
||||||
tags: [event, cardiffuniversity]
|
|
||||||
---
|
|
||||||
|
|
||||||
Next week I, along with others in a team, am taking part in [Cardiff Open Sauce Hackathon](http://www.cs.cf.ac.uk/hackathon/).
|
|
||||||
|
|
||||||
If you're in the area and feel like joining in for the weekend then sign up at the link above.
|
|
||||||
|
|
||||||
he hackathon is a two-day event in which teams work to 'hack together' smallish projects, which will be open-sourced at the end of the weekend. Whilst we have a few ideas already for potential projects, if anyone has any cool ideas for something relatively quick, but useful, to make, then please let me know!
|
|
@ -1,30 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-04-16T17:00:00Z"
|
|
||||||
title: Trials of Eduroam
|
|
||||||
description: "Connecting to Eduroam using Arch Linux"
|
|
||||||
tags: [linux, wifi, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
I've been having trouble connecting to Eduroam, at least reliably and persistently, without heavy desktop environments or complicated network managers. Eduroam is the wireless networking service used by many Universities in Europe, and whilst it would probably work fine using the tools provided by heavier DEs, I wanted something that could just run quickly and independently.
|
|
||||||
|
|
||||||
Many approaches require the editing of loads of config files (especially true for `netcfg`), which would need altering again after things like password changes. The approach I used (for Arch Linux) is actually really simple and involves the use of the user-contributed `wicd-eduroam` package available in the [Arch User Repository](https://aur.archlinux.org/packages/wicd-eduroam/).
|
|
||||||
|
|
||||||
Obviously, `wicd-eduroam` is related to, and depends on, `wicd`, a handy network connection manager, so install that first:
|
|
||||||
|
|
||||||
```
|
|
||||||
# pacman -S wicd
|
|
||||||
$ yaourt -S wicd-eduroam
|
|
||||||
```
|
|
||||||
|
|
||||||
(If you don't use `yaourt` download the [tarball](https://aur.archlinux.org/packages/wi/wicd-eduroam/wicd-eduroam.tar.gz) and build it using the `makepkg` method.)
|
|
||||||
|
|
||||||
`wicd` can conflict with other network managers, so stop and disable them before starting and enabling `wicd`. This will allow it to startup at boot time. e.g.:
|
|
||||||
|
|
||||||
```
|
|
||||||
# systemctl stop NetworkManager
|
|
||||||
# systemctl disable NetworkManager
|
|
||||||
# systemctl start wicd
|
|
||||||
# systemctl enable wicd
|
|
||||||
```
|
|
||||||
|
|
||||||
Now start `wicd-client` (or set it to autostart), let it scan for networks, and edit the properties of the network `eduroam` Set the encryption type as `eduroam` in the list, enter the username and password, click OK and then allow it to connect.
|
|
@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-04-23T17:00:00Z"
|
|
||||||
title: flyingsparx.net On Digital Ocean
|
|
||||||
description: "Deploying my personal website to Digital Ocean"
|
|
||||||
tags: [digitalocean, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
My hosting for [my website](http://www.willwebberley.net) has nearly expired, so I have been looking for renewal options.
|
|
||||||
|
|
||||||
These days I tend to need to use servers for more than simple web-hosting, and most do not provide the flexibility that a VPS would. Having (mostly) full control over a properly-maintained virtual cloud server is so much more convenient, and allows you to do tonnes of stuff beyond simple web hosting.
|
|
||||||
|
|
||||||
I have some applications deployed on [Heroku](https://www.heroku.com), which is definitely useful and easy for this purpose, but I decided to complement this for my needs by buying a 'droplet' from [Digital Ocean](https://www.digitalocean.com).
|
|
||||||
|
|
||||||
Droplets are DO's term for a server instance, and are super quick to set up (55 seconds from first landing at their site to a booted virtual server, they claim) and very reasonably priced. I started an Arch instance, quickly set up nginx, Python and uwsgi, and started this blog and site as a Python app running on the Flask microframework.
|
|
||||||
|
|
||||||
So far, I've had no issues, and everything seems to work quickly and smoothly. If all goes to plan, over the next few months I'll migrate some more stuff over, including the backend for the Gower Tides app.
|
|
@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-04-25T17:00:00Z"
|
|
||||||
title: eartub.es
|
|
||||||
description: "Working on eartub.es at the Cardiff Open Sauce Hackathon"
|
|
||||||
tags: [event, project, cardiffuniversity]
|
|
||||||
---
|
|
||||||
|
|
||||||
Last weekend I went to [CFHack Open Sauce Hackathon](http://www.cs.cf.ac.uk/hackathon). I worked in a team with [Chris](http://christopher-gwilliams.com), [Ross](https://twitter.com/OnyxNoir) and [Matt](http://users.cs.cf.ac.uk/M.P.John/).
|
|
||||||
|
|
||||||
We started work on [eartub.es](http://eartub.es), which is a web application for suggesting movies based on their sound tracks. We had several ideas for requirements we wanted to meet but, due to the nature of hackathons, we didn't do nearly as much as what we thought we would!
|
|
||||||
|
|
||||||
For now, eartubes allows you to search for a movie (from a 2.5 million movie database) and view other movies with similar soundtracks. This is currently based on cross matching the composer between movies, but more in-depth functionality is still in the works. We have nearly completed Last.fm integration, which would allow the app to suggest movies from your favourite and most listened-to music, and are working towards genre-matching and other, more complex, learning techniques. The registration functionality is disabled while we add this extra stuff.
|
|
||||||
|
|
||||||
The backend is written in Python and runs as a Flask application. Contrary to my usual preference, I worked on the front end of the application, but also wrote our internal API for Last.fm integration. It was a really fun experience, in which everyone got on with their own individual parts, and it was good to see the project come together at the end of the weekend.
|
|
||||||
|
|
||||||
The project's source is on [GitHub](https://github.com/encima/eartubes).
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-05-07T17:00:00Z"
|
|
||||||
title: Contribution to Heroku Dev Center
|
|
||||||
description: "Contributing a blog post on direct uploads to S3 to the Heroku Dev Center"
|
|
||||||
tags: [contribution, heroku, python, aws, s3]
|
|
||||||
---
|
|
||||||
|
|
||||||
The [Heroku Dev Center](https://devcenter.heroku.com) is a repository of guides and articles to provide support for those writing applications to be run on the [Heroku](https://heroku.com) platform.
|
|
||||||
|
|
||||||
I recently contributed an article for carrying out [Direct to S3 File Uploads in Python](https://devcenter.heroku.com/articles/s3-upload-python), as I have previously used a very similar approach to interface with Amazon's Simple Storage Service in one of my apps running on Heroku.
|
|
||||||
|
|
||||||
The approach discussed in the article focuses on avoiding as much server-side processing as possible, with the aim of preventing the app's web dynos from becoming too tied up and unable to respond to further requests. This is done by using client-side JavaScript to asynchronously carry out the upload directly to S3 from the web browser. The only necessary server-side processing involves the generation of a temporarily-signed (using existing AWS credentials) request, which is returned to the browser in order to allow the JavaScript to successfully make the final `PUT` request.
|
|
||||||
|
|
||||||
The guide's [companion git repository](https://github.com/willwebberley/FlaskDirectUploader) hopes to demonstrate a simple use-case for this system. As with all of the Heroku Dev Center articles, if you have any feedback (e.g. what could be improved, what helped you, etc.), then please do provide it!
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-05-26T17:00:00Z"
|
|
||||||
title: Gower Tides Open-Sourced
|
|
||||||
description: "Open-sourcing Gower Tides Android app"
|
|
||||||
tags: [android, project, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
This is just a quick post to mention that I have made the source for the [Gower Tides](https://play.google.com/store/apps/details?id=net.willwebberley.gowertides) app on Google Play public.
|
|
||||||
|
|
||||||
The source repository is available on [GitHub](https://github.com/willwebberley/GowerTides). From the repository I have excluded:
|
|
||||||
|
|
||||||
- **Images & icons** - It is not my place to distribute graphics not owned or created by me. Authors are credited in the repo's README and in the application.
|
|
||||||
- **External libraries** - The app requires a graphing package and a class to help with handling locally-packaged SQLite databases. Links to both are also included in the repo's README.
|
|
||||||
- **Tidal data** - The tidal data displayed in the app has also been excluded. However, the format for the data stored by the app should be relatively obvious from its access in the [source](https://github.com/willwebberley/GowerTides/blob/master/src/net/willwebberley/gowertides/utils/DayDatabase.java).
|
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-06-12T17:00:00Z"
|
|
||||||
title: WekaPy
|
|
||||||
description: "Weka bindings for Python"
|
|
||||||
tags: [weka, python, machinelearning, project, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
Over the last few months, I've started to use Weka more and more. [Weka](http://www.cs.waikato.ac.nz/ml/weka/) is a toolkit, written in Java, that I use to create models with which to make classifications on data sets.
|
|
||||||
|
|
||||||
It features a wide variety of different machine learning algorithms (although I've used the logistic regressions and Bayesian networks most) which can be trained on data in order to make classifications (or 'predictions') for sets of instances.
|
|
||||||
|
|
||||||
Weka comes as a GUI application and also as a library of classes for use from the command line or in Java applications. I needed to use it to create some large models and several smaller ones, and using the GUI version makes the process of training the model, testing it with data and parsing the classifications a bit clunky. I needed to automate the process a bit more.
|
|
||||||
|
|
||||||
Nearly all of the development work for my PhD has been in Python, and it'd be nice to just plug in some machine learning processes over my existing code. Whilst there are some wrappers for Weka written for Python ([this project](https://github.com/chrisspen/weka), [PyWeka](https://pypi.python.org/pypi/PyWeka), etc.), most of them feel unfinished, are under-documented or are essentially just instructions on how to use [Jython](http://www.jython.org/).
|
|
||||||
|
|
||||||
So, I started work on [WekaPy](https://github.com/willwebberley/WekaPy), a simple wrapper that allows efficient and Python-friendly integration with Weka. It basically just involves subprocesses to execute Weka from the command line, but also includes several areas of functionality aimed to provide more of a seamless and simple experience to the user.
|
|
||||||
|
|
||||||
I haven't got round to writing proper documentation yet, but most of the current functionality is explained and demo'd through examples [here](https://github.com/willwebberley/WekaPy#example-usage). Below is an example demonstrating its ease of use.
|
|
||||||
|
|
||||||
```
|
|
||||||
model = Model(classifier_type = "bayes.BayesNet")
|
|
||||||
model.train(training_file = "train.arff")
|
|
||||||
model.test(test_file = "test.arff")
|
|
||||||
```
|
|
||||||
|
|
||||||
All that is needed is to instantiate the model with your desired classifier, train it with some training data and then test it against your test data. The predictions can then be easily extracted from the model as shown [in the documentation](https://github.com/willwebberley/WekaPy#accessing-the-predictions).
|
|
||||||
|
|
||||||
I hope to continue updating the library and improving the documentation when I get a chance! Please let me know if you have any ideas for functionality.
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-06-20T17:00:00Z"
|
|
||||||
title: Accidental Kernel Upgrades on Digital Ocean
|
|
||||||
description: "Issues when accidentally upgrading the kernel in Arch Linux on Digital Ocean"
|
|
||||||
tags: [linux, digitalocean, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
I today issued a full upgrade of the server at flyingsparx.net, which is hosted by [Digital Ocean](https://www.digitalocean.com). By default, on Arch, this will upgrade every currently-installed package (where there is a counterpart in the official repositories), including the Linux kernel and the kernel headers.
|
|
||||||
|
|
||||||
Digital Ocean maintain their own kernel versions and do not currently allow kernel switching, which is something I completely forgot. I rebooted the machine and tried re-connecting, but SSH couldn't find the host. Digital Ocean's website provides a console for connecting to the instance (or 'droplet') through VNC, which I used, through which I discovered that none of the network interfaces (except the loopback) were being brought up. I tried everything I could think of to fix this, but without being able to connect the droplet to the Internet, I was unable to download any other packages.
|
|
||||||
|
|
||||||
Eventually, I contacted DO's support, who were super quick in replying. They pointed out that the upgrade may have also updated the kernel (which, of course, it had), and that therefore the modules for networking weren't going to load properly. I restored the droplet from one of the automatic backups, swapped the kernel back using DO's web console, rebooted and things were back to where they should be.
|
|
||||||
|
|
||||||
The fact that these things can be instantly fixed from their console and their quick customer support make Digital Ocean awesome! If they weren't possible then this would have been a massive issue, since the downtime also took out this website and the backend for a couple of mobile apps. If you use an Arch instance, then there is a [community article](https://www.digitalocean.com/community/articles/pacman-syu-kernel-update-solved-how-to-ignore-arch-kernel-upgrades) on their website explaining how to make pacman ignore kernel upgrades and to stop this from happening.
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-07-03T17:00:00Z"
|
|
||||||
title: Magic Seaweed's Awesome New API
|
|
||||||
description: "Making use of the Magic Seaweed web API for surf data"
|
|
||||||
tags: [project, android, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
Back in March, I emailed [Magic Seaweed](http://magicseaweed.com) to ask them if they had a public API for their surf forecast data. They responded that they didn't at the time, but that it was certainly on their to-do list. I am interested in the marine data for my [Gower Tides](https://play.google.com/store/apps/details?id=net.willwebberley.gowertides) application.
|
|
||||||
|
|
||||||
Yesterday, I visited their website to have a look at the surf reports and some photos, when I noticed the presence of a [Developer](http://magicseaweed.com/developer/api) link in the footer of the site. It linked to pages about their new API, with an overview describing exactly what I wanted.
|
|
||||||
|
|
||||||
Since the API is currently in beta, I emailed them requesting a key, which they were quick to respond with and helpfully included some further example request usages. They currently do not have any strict rate limits in place, but instead have a few [fair practice terms](http://magicseaweed.com/developer/terms-and-conditions) to discourage developers from going a bit trigger happy on API requests. They also request that you use a hyperlinked logo to accredit the data back to them. Due to caching, I will not have to make too many requests (since the application will preserve 'stale' data for 30 minutes before refreshing from Magic Seaweed, when requested), so hopefully that will keep the app's footprint down.
|
|
||||||
|
|
||||||
I have written the app's new [backend support](https://github.com/willwebberley/GowerTidesBackend) for handling and caching the surf data ready for incorporating into the Android app soon. So far, the experience has been really good, with the API responding with lots of detailed information - almost matching the data behind their own [surf forecasts](http://magicseaweed.com/Llangennith-Rhossili-Surf-Report/32/). Hopefully they won't remove any of the features when they properly release it!
|
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-07-31T17:00:00Z"
|
|
||||||
title: Gower Tides v1.4
|
|
||||||
description: "Announcing the latest version of Gower Tides Android app"
|
|
||||||
tags: [android, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
![Surf forecasts](https://will.now.sh/static/media/v1-4_surf.png)
|
|
||||||
|
|
||||||
Last week I released a new version of the tides Android app I'm currently developing.
|
|
||||||
|
|
||||||
The idea of the application was initially to simply display the tidal times and patterns for the Gower Peninsula, and that this should be possible without a data connection. Though, as the time has gone by, I keep finding more and more things that can be added!
|
|
||||||
|
|
||||||
The latest update saw the introduction of 5-day surf forecasts for four Gower locations - Llangennith, Langland, Caswell Bay, and Hunts Bay. All the surf data comes from [Magic Seaweed](http://magicseaweed.com)'s API (which I [talked about](/blog/2013/07/03/magic-seaweeds-awesome-new-api/) last time).
|
|
||||||
|
|
||||||
![Location choices](https://flyingsparx.net/static/media/v1-4_location.png)
|
|
||||||
|
|
||||||
he surf forecasts are shown, for each day they are available, as a horizontal scroll-view, allowing users to scroll left and right within that day to view the forecast at different times of the day (in 3-hourly intervals).
|
|
||||||
|
|
||||||
Location selection is handled by a dialog popup, which shows a labelled map and a list of the four available locations in a list view.
|
|
||||||
|
|
||||||
The [backend support](https://github.com/willwebberley/GowerTidesBackend) for the application was modified to now also support 30-minute caching of surf data on a per-location basis (i.e. new calls to Magic Seaweed would not be made if the requested _location_ had been previously pulled in the last 30 minutes). The complete surf and weather data is then shipped back to the phone as one JSON structure.
|
|
||||||
|
|
||||||
![Tides view update](https://flyingsparx.net/static/media/v1-4_tides.png)
|
|
||||||
|
|
||||||
Other updates were smaller but included an overhaul of the UI (the tide table now looks a bit nicer), additional licensing information, more speedy database interaction, and so on.
|
|
||||||
|
|
||||||
If you are interested in the source, then that is available [here](https://github.com/willwebberley/GowerTides), and the app itself is on [Google Play](https://play.google.com/store/apps/details?id=net.willwebberley.gowertides&hl=en). If you have any ideas, feedback or general comments, then please let me know!
|
|
@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-08-31T17:00:00Z"
|
|
||||||
title: A rather French week
|
|
||||||
description: "Away in France for a week"
|
|
||||||
tags: [life, holiday]
|
|
||||||
---
|
|
||||||
|
|
||||||
I recently spent a week in France as part of a holiday with some of my family. Renting houses for a couple of weeks in France or Italy each summer has almost become a bit of a tradition, and it's good to have a relax and a catch-up for a few days. They have been the first proper few days (other than the <a href="/blog/13/3/30/a-bit-of-light-construction-on-an-easter-weekend/" target="_blank">decking-building adventure</a> back in March) I have had away from University in 2013, so I felt it was well-deserved!
|
|
||||||
|
|
||||||
![The house](/media/blog/french-house.JPG)
|
|
||||||
|
|
||||||
This year we stayed in the Basque Country of southern France, relatively near Biarritz, in a country farmhouse. Although we weren't really within walking distance to anywhere, the house did come with a pool in the garden, with a swimmable river just beyond, and an amazing, peaceful setting.
|
|
||||||
|
|
||||||
Strangely enough, there was no Internet installation at the house, and no cellular reception anywhere nearby. This took a bit of getting-used to, but after a while it became quite relaxing not having to worry about checking emails, texts, and Twitter. The only thing to cause any stress was a crazed donkey, living in the field next door, who would start braying loudly at random intervals through the nights, waking everyone up.
|
|
||||||
|
|
||||||
![French Gorge](/media/blog/french-gorge.JPG)
|
|
||||||
|
|
||||||
As might be expected, the food and drink was exceptional. Although we did end up eating in the house each evening (to save having someone sacrifice themselves to be the designated driver), the foods we bought from the markets were very good, and the fact that wine cost €1.50 per bottle from the local Intermarché gave very little to complain about.
|
|
||||||
|
|
||||||
The majority of most days was spent away from the house, visiting local towns, the beaches and the Pyrenees. We spent a few afternoons walking in the mountains, with some spectacular scenery.
|
|
||||||
|
|
||||||
![Pyrenees](/media/blog/french-pyrenes.JPG)
|
|
@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-09-02T17:00:00Z"
|
|
||||||
title: "Zoned Network Sound-Streaming: The Problem"
|
|
||||||
description: "Multi-room audio simultaneous playback"
|
|
||||||
tags: [linux, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
For a while, now, I have been looking for a reliable way to manage zoned music-playing around the house. The general idea is that I'd like to be able to play music from a central point and have it streamed over the network to a selection of receivers, which could be remotely turned on and off when required, but still allow for multiple receivers to play simulataneously.
|
|
||||||
|
|
||||||
Apple's [AirPlay](http://www.apple.com/uk/airplay/) has supported this for a while now, but requires the purchasing of AirPlay compatible hardware, which is expensive. It's also very iTunes-based - which is something that I do not use.
|
|
||||||
|
|
||||||
Various open-source tools also allow network streaming. [Icecast](http://www.icecast.org/) (through the use of [Darkice](https://code.google.com/p/darkice/)) allows clients to stream from a multimedia server, but this causes pretty severe latency in playback between clients (ranging up to around 20 seconds, I've found) - not a good solution in a house!
|
|
||||||
|
|
||||||
[PulseAudio](http://www.freedesktop.org/wiki/Software/PulseAudio/) is partly designed around being able to work over the network, and supports the discovery of other PulseAudio sinks on the LAN and the selection a sound card to transmit to through TCP. This doesn't seem to support multiple sound card sinks very well, however.
|
|
||||||
|
|
||||||
PulseAudio's other network feature is its RTP broadcasting, and this seemed the most promising avenue for progression in solving this problem. RTP utilises UDP, and PulseAudio effecively uses this to broadcast its sound to any devices on the network that might be listening on the broadcast address. This means that one server could be run and sink devices could be set up simply to receive the RTP stream on demand - perfect!
|
|
||||||
|
|
||||||
However, in practice, this turned out not to work very well. With RTP enabled, PulseAudio would entirely flood the network with sound packets. Although this isn't a problem for devices with a wired connection, any devices connected wirelessly to the network would be immediately disassociated from the access point due to the complete saturation of PulseAudio's packets being sent over the airwaves.
|
|
||||||
|
|
||||||
This couldn't be an option in a house where smartphones, games consoles, laptops, and so on require the WLAN. After researching this problem a fair bit (and finding many others experiencing the same issues), I found [this page](http://www.freedesktop.org/wiki/Software/PulseAudio/Documentation/User/Network/RTP/), which describes various methods for using RTP streaming from PulseAudio and includes (at the bottom) the key that could fix my problems - the notion of compressing the audio into MP3 format (or similar) before broadcasting it.
|
|
||||||
|
|
||||||
Trying this technique worked perfectly, and did not cause network floods anywhere nearly as severely as the uncompressed sound stream; wireless clients no longer lost access to the network once the stream was started and didn't seem to lose any noticeable QoS at all. In addition, when multiple clients connected, the sound output would be nearly entirely simultaneous (at least after a few seconds to warm up).
|
|
||||||
|
|
||||||
Unfortunately, broadcasting still didn't work well over WLAN (sound splutters and periodic drop-outs), so the master server and any sound sinks would need to be on a wired network. This is a small price to pay, however, and I am happy to live with a few Ethernet-over-power devices around the house. The next stage is to think about what to use as sinks. Raspberry Pis should be powerful enough and are _significantly_ cheaper than Apple's equivalent. They would also allow me to use existing sound systems in some rooms (e.g. the surround-sound in the living room), and other simple speaker setups in others. I also intend to write a program around PulseAudio to streamline the streaming process and a server for discovering networked sinks.
|
|
||||||
|
|
||||||
I will write an update when I have made any more progress on this!
|
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-09-14T17:00:00Z"
|
|
||||||
title: CasaStream
|
|
||||||
description: "Discussing a solution for multi-room synchronous audio playback"
|
|
||||||
tags: [project, linux, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
In my [last post](/blog/2013/09/02/zoned-network-sound-streaming-the-problem) I discussed methods for streaming music to different zones in the house. More specifically I wanted to be able to play music from one location and then listen to it in other rooms at the same time and in sync.
|
|
||||||
|
|
||||||
After researching various methods, I decided to go with using a compressed MP3 stream over RTP. Other techniques introduced too much latency, did not provide the flexibility I required, or simply did not fulfill the requirements (e.g. not multiroom, only working with certain applications and non-simultaneous playback).
|
|
||||||
|
|
||||||
To streamline the procedure of compressing the stream, broadcasting the stream, and receiving and playing the stream, I have started a project to create an easily-deployable wrapper around PulseAudio and VLC. The system, somewhat cheesily named [CasaStream](https://github.com/willwebberley/CasaStream) and currently written primarily in Python, relies on a network containing one machine running a CasaStream Master server and any number of machines running a CasaStream Slave server.
|
|
||||||
|
|
||||||
![Casastream interface](/media/blog/casastream1.png)
|
|
||||||
|
|
||||||
The Master server is responsible for compressing and broadcasting the stream, and the Slaves receive and play the stream back through connected speakers. Although the compression is relatively resource-intensive (at least, for the moment), the Slave server is lightweight enough to be run on low-powered devices, such as the Raspberry Pi. Any machine that is powerful enough to run the Master could also simultaneously run a Slave, so a dedicated machine to serve the music alone is not required.
|
|
||||||
|
|
||||||
![Casastream interface](/media/blog/casastream2.png)
|
|
||||||
|
|
||||||
The Master server also runs a web interface, allowing enabling of the system and to disable and enable Slaves. Slave servers are automatically discovered by the Master, though it is possible to alter the scan range from the web interface also. In addition, the selection of audio sources to stream (and their output volumes) and the renaming of Slaves are available as options. Sound sources are usually automatically detected by PulseAudio (if it is running), so there is generally no manual intervention required to 'force' the detection of sources.
|
|
||||||
|
|
||||||
My current setup consists of a Master server running on a desktop machine in the kitchen, and Slave servers running on various other machines throughout the house (including the same kitchen desktop connected to some orbital speakers and a Raspberry Pi connected to the surround sound in the living room). When all running, there is no notable delay between the audio output in the different rooms.
|
|
||||||
|
|
||||||
There are a few easily-installable dependencies required to run both servers. Both require Python (works on V2.*, but I haven't tested on V3), and both require the Flask microframework and VLC. For a full list, please see the [README](https://github.com/willwebberley/CasaStream/blob/master/README.md) at the project's home, which also provides more information on the installation and use.
|
|
||||||
|
|
||||||
Unfortunately, there are a couple of caveats: firstly, the system is not reliable over WLAN (the sound gets pretty choppy), so a wired connection is recommended. Secondly, if using ethernet-over-power to mitigate the first caveat, then you may experience sound dropouts every 4-5 minutes. To help with this problem, the Slave servers are set to restart the stream every four minutes (by default).
|
|
||||||
|
|
||||||
This is quite an annoying issue, however, since having short sound interruptions every few minutes is very noticeable. Some of my next steps with this project, therefore, are based around trying to find a better fix for this. In addition, I'd like to reduce the dependency footprint (the Slave servers really don't need to use a fully-fledged web server), reduce the power requirements at both ends, and to further automate the installation process.
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2013-10-05T17:00:00Z"
|
|
||||||
title: Workshop Presentation in Germany
|
|
||||||
description: "Presenting research in Germany"
|
|
||||||
tags: [talk, research]
|
|
||||||
---
|
|
||||||
|
|
||||||
Last week I visited Karlsruhe, in Germany, to give a presentation accompanying a recently-accepted paper. The paper, "Inferring the Interesting Tweets in Your Network", was in the proceedings of the Workshop on Analyzing Social Media for the Benefit of Society ([Society 2.0](http://www.cs.cf.ac.uk/cosmos/node/12)), which was part of the Third International Conference on Social Computing and its Applications ([SCA](http://socialcloud.aifb.uni-karlsruhe.de/confs/SCA2013/)).
|
|
||||||
|
|
||||||
Although I only attended the first workshop day, there was a variety of interesting talks on social media and crowdsourcing. My own talk went well and there was some useful feedback from the attendees.
|
|
||||||
|
|
||||||
I presented my recent work on the use of machine learning techniques to help in identifying interesting information in Twitter. I rounded up some of the results from the Twinterest experiment we ran a few months ago and discussed how this helped address the notion of information _relevance_ as an extension to global _interestingness_.
|
|
||||||
|
|
||||||
I hadn't been to Germany before this, so it was also a culturally-interesting visit. I was only there for two nights but I tried to make the most of seeing some of Karlsruhe and enjoying the traditional food and local beers!
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2014-01-17T17:00:00Z"
|
|
||||||
title: Direct-to-S3 Uploads in Node.js
|
|
||||||
description: "Uploading assets directly to S3 using Node.js"
|
|
||||||
tags: [heroku, javascript, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
A while ago I wrote an [article](https://devcenter.heroku.com/articles/s3-upload-python) for [Heroku](https://heroku.com)'s Dev Center on carrying out direct uploads to S3 using a Python app for signing the PUT request. Specifically, the article focussed on Flask but the concept is also applicable to most other Python web frameworks.
|
|
||||||
|
|
||||||
I've recently had to implement something similar, but this time as part of an [Node.js](http://nodejs.org) application. Since the only difference between the two approaches is literally just the endpoint used to return a signed request URL, I thought I'd post an update on how the endpoint could be constructed in Node.
|
|
||||||
|
|
||||||
The front-end code in the companion repository demonstrates an example of how the endpoint can be queried to retrieve the signed URL, and is available [here](https://github.com/willwebberley/FlaskDirectUploader/blob/master/templates/account.html). Take a look at that repository's README for information on the front-end dependencies.
|
|
||||||
|
|
||||||
The full example referenced by the Python article is in a [repository](https://github.com/willwebberley/FlaskDirectUploader) hosted by GitHub and may be useful in providing more context.
|
|
@ -1,16 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2014-01-28T17:00:00Z"
|
|
||||||
title: Seminar at King's College London
|
|
||||||
description: "Giving a seminar on my research at KCL"
|
|
||||||
tags: [talk, kcl, research]
|
|
||||||
---
|
|
||||||
|
|
||||||
Last week, I was invited to give a seminar to the Agents and Intelligent Systems group in the [Department of Informatics](http://www.kcl.ac.uk/nms/depts/informatics/index.aspx) at King's College London.
|
|
||||||
|
|
||||||
I gave an overview of my PhD research conducted over the past two or three years, from my initial research into retweet behaviours and propagation characteristics through to studies on the properties exhibited by Twitter's social graph and the effects that the interconnection of users have on message dissemination.
|
|
||||||
|
|
||||||
I finished by outlining our methods for identifying interesting content on Twitter and by demonstrating its relative strengths and weaknesses as were made clear by crowd-sourced validations carried out on the methodology results.
|
|
||||||
|
|
||||||
There was some very interesting and useful questions from the audience, some of which is now being taken into consideration in my thesis. It was also good to visit another computer science department and to hear about the work done independently and collaboratively by its different research groups.
|
|
||||||
|
|
||||||
The slides from the seminar are available [here](http://flyingsparx.net/static/downloads/kcl_seminar_2014.pdf) and there is a [blog post](http://inkings.org/2014/02/03/tweets-and-retweets) about it on the Department of Informatics' website.
|
|
@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2014-03-17T17:00:00Z"
|
|
||||||
title: Node.js Contribution to Heroku's Dev Center
|
|
||||||
description: "Contributing another article to the Heroku Dev Center"
|
|
||||||
tags: [contribution, heroku, javascript]
|
|
||||||
---
|
|
||||||
|
|
||||||
I recently wrote a new article for Heroku's Dev Center on carrying out asynchronous direct-to-S3 uploads using Node.js.
|
|
||||||
|
|
||||||
he article is based heavily on the previous [Python version](/blog/13/5/7/contribution-to-heroku-dev-center/), where the only major change is the method for signing the AWS request. This method was outlined in an [earlier blog post](/blog/2014/1/17/direct-to-s3-uploads-in-node.js).
|
|
||||||
|
|
||||||
The article is available [here](https://devcenter.heroku.com/articles/s3-upload-node) and there is also a [companion code repository](https://github.com/willwebberley/NodeDirectUploader) for the example it describes.
|
|
@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2014-03-26T17:00:00Z"
|
|
||||||
title: Talk on Open-Source Contribution
|
|
||||||
description: "Internal seminar on contributing to open-source projects"
|
|
||||||
tags: [talk, opensource]
|
|
||||||
---
|
|
||||||
|
|
||||||
Today I gave an internal talk at the School of Computer Science & Informatics about open-source contribution.
|
|
||||||
|
|
||||||
The talk described some of the disadvantages of the ways in which hobbyists and the non-professional sector publicly publish their code. A lot of the time these projects do not receive much visibility or use from others.
|
|
||||||
|
|
||||||
Public contribution is important to the open-source community, which is driven largely by volunteers and enthusiasts, so the point of the talk was to try and encourage people to share expert knowledge through contributing documentation (wikis, forums, articles, etc.), maintaining and adopting packages, and getting more widely involved.
|
|
@ -1,12 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-01-20T17:00:00Z"
|
|
||||||
title: End of an Era
|
|
||||||
description: "Completing my PhD"
|
|
||||||
tags: [life, phd, research]
|
|
||||||
---
|
|
||||||
|
|
||||||
I recently received confirmation of my completed PhD! I submitted my thesis in May 2014, passed my viva in September and returned my final corrections in December.
|
|
||||||
|
|
||||||
I was examined internally by [Dr Pete Burnap](http://burnap.org) and also by [Dr Jeremy Pitt](http://www.iis.ee.ic.ac.uk/~j.pitt/Home.html) of Imperial College London.
|
|
||||||
|
|
||||||
The whole PhD was an amazing experience, even during the more stressful moments. I learnt a huge amount across many domains and I cannot thank my supervisors, [Dr Stuart Allen](http://users.cs.cf.ac.uk/Stuart.M.Allen) and [Prof Roger Whitker](http://users.cs.cf.ac.uk/R.M.Whitaker), enough for their fantastic support and guidance throughout.
|
|
@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-01-27T17:00:00Z"
|
|
||||||
title: NHS Hack Day
|
|
||||||
description: "Taking part in the 2015 NHS Hack Day"
|
|
||||||
tags: [event, nhs]
|
|
||||||
---
|
|
||||||
|
|
||||||
This weekend I took part in the [NHS Hack Day](http://nhshackday.com). The idea of the event is to bring healthcare professionals together with technology enthusiasts in order to build stuff that is useful for those within the NHS and for those that use it. It was organised by [AnneMarie Cunningham](https://twitter.com/amcunningham"), who did a great job in making the whole thing run smoothly!
|
|
||||||
|
|
||||||
![NHS Hack Day](/media/blog/nhshackday2.jpg)
|
|
||||||
|
|
||||||
**This was our team! The image is released under a Creative Commons BY-NC2.0 license by [Paul Clarke](https://www.flickr.com/photos/paul_clarke).**
|
|
||||||
|
|
||||||
I was asked to go along and give a hand by [Martin](http://martinjc.com), who also had four of his MSc students with him. [Matt](http://mattjw.net), previously from [Cardiff CS&I](http://cs.cf.ac.uk), also came to provide his data-handling expertise.
|
|
||||||
|
|
||||||
![NHS Hack Day 2](/media/blog/nhshackday.png)
|
|
||||||
|
|
||||||
We built a webapp, called [Health Explorer Wales](http://compjcdf.github.io/nhs_hack/app.html), that attempts to visualise various data for health boards and communities in Wales. One of the main goals of the app was to make it maintainable, so that users in future could easily add their own geographic or numeric data to visualise. For this, it was important to decide on an extensible [data schema](https://github.com/CompJCDF/nhs_hack/blob/master/data/descriptors.json) for describing data, and suitable data formats.
|
|
||||||
|
|
||||||
Once the schema was finalised, we were able to go ahead and build the front-end, which used [D3.js](http://d3js.org) to handle the visualisations. This was the only third-party library we used in the end. The rest of the interface included controls, such as a dataset-selector and controls for sliding back through time (for timeseries data). The app is purely front-end, which means it can essentially be shipped as a single HTML file (with linked scripts and styles).
|
|
||||||
|
|
||||||
We also included an 'add dataset' feature, which allows users to add a dataset to be visualised, as long as the schema is observed. In true hackathon style, any exceptions thrown will currently cause the process to fail silently ;) The [GitHub repository](https://github.com/CompJCDF/nhs_hack) for the app contains a wiki with some guidance on data-formatting. Since the app is front-end only, any data added is persisted using HTML5 local storage and is therefore user-specific.
|
|
||||||
|
|
||||||
Generally, I am pleased with the result. The proof-of-concept is (mostly) mobile-friendly, and allows for easily showing off data in a more comprehensible way than through just using spreadsheets. Although we focussed on visualising only two datatypes initially (we all <3 [#maps](https://twitter.com/_r_309)), we hope to extend this by dropping in modules for supporting new formats in the future.
|
|
||||||
|
|
||||||
There were many successful projects completed as part of the event, including a new 'eye-test' concept involving a zombie game using an Oculus Rift and an app for organising group coastal walks around Wales. A full list of projects is available on the event's [website](http://nhshackday.com/previous/events/2015/01/cardiff"). I really enjoyed the weekend and hope to make the next one in London in May!
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-02-05T17:00:00Z"
|
|
||||||
title: Developing Useful APIs for the Web
|
|
||||||
description: "Internal seminar on developing useful and effective web APIs"
|
|
||||||
tags: [talk, webapi]
|
|
||||||
---
|
|
||||||
|
|
||||||
Yesterday, I gave a talk about my experiences with developing and using RESTful APIs, with the goal of providing tips for structuring such interfaces so that they work in a useful and sensible way.
|
|
||||||
|
|
||||||
I went back to first principles, with overviews of basic HTTP messages as part of the request-response cycle and using sensible status codes in HTTP responses. I discussed the benefits of 'collection-oriented' endpoint URLs to identify resources that can be accessed and modified and the use of HTTP methods to describe what to do with these resources.
|
|
@ -1,18 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-02-18T17:00:00Z"
|
|
||||||
title: Web and Social Computing
|
|
||||||
description: "Lecturing masters students on Web and Social Computing"
|
|
||||||
tags: [cardiffuniversity, teaching]
|
|
||||||
---
|
|
||||||
|
|
||||||
his week I begin lecturing a module for [Cardiff School of Computer Science and Informatics](http://cs.cf.ac.uk)' postgraduate MSc course in [Advanced Computer Science](http://courses.cardiff.ac.uk/postgraduate/course/detail/p071.html).
|
|
||||||
|
|
||||||
The module is called Web and Social Computing, with the main aim being to introduce students to the concepts of social computing and web-based systems. The course will include both theory and practical sessions in order to allow them to enhance their knowledge derived from literature with the practice of key concepts. We'll also have lots of guest lectures from experts in specific areas to help reinforce the importance of this domain.
|
|
||||||
|
|
||||||
As part of the module, I will encourage students to try and increase their web-presence and to interact with a wider community on the Internet. They'll do this by engaging more with social media and by maintaining a blog on things they've learned and researched.
|
|
||||||
|
|
||||||
Each week, the students will give a 5-minute [Ignite-format](http://en.wikipedia.org/wiki/Ignite_%28event%29) talk on the research they've carried out. The quick presentation style will allow everyone in the group to convey what they feel are the most important and relevant parts in current research across many of the topics covered in the module.
|
|
||||||
|
|
||||||
We'll cover quite a diverse range of topics, starting from an introduction to networks and a coverage of mathematical graph theory. This will lead on to social networks, including using APIs to harvest data in useful ways. Over the last few weeks, we'll delve into subjects around socially-driven business models and peer-to-peer finance systems, such as BitCoin.
|
|
||||||
|
|
||||||
During the course, I hope that students will gain practical experience with various technologies, such as [NetworkX](https://networkx.github.io) for modelling and visualising graphs in Python, [Weka](http://www.cs.waikato.ac.nz/ml/weka) for some machine learning and classification, and good practices for building and using web APIs.
|
|
@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-04-28T17:00:00Z"
|
|
||||||
title: Media and volume keys in i3
|
|
||||||
description: "Keybinds for media control and volume in i3 window manager"
|
|
||||||
tags: [linux, i3, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
As is the case with many people, all music I listen to on my PC these days plays from the web through a browser. I'm a heavy user of Google Play Music and SoundCloud, and using Chrome to handle everything means playlists and libraries (and the way I use them through extensions) sync up properly everywhere I need them.
|
|
||||||
|
|
||||||
On OS X I use [BearededSpice](http://beardedspice.com) to map the keyboard media controls to browser-based music-players, and the volume keys adjusted the system as they should. Using [i3](https://i3wm.org) (and other lightweight window managers) can make you realise what you take for granted when using more fully-fledged arrangements, but it doesn't take long to achieve the same functionality on such systems.
|
|
||||||
|
|
||||||
A quick search revealed [keysocket](https://github.com/borismus/keysocket) - a Chrome extension that listens out for the hardware media keys and is able to interact with a large list of supported music websites. In order to get the volume controls working, I needed to map i3 through to `alsa`, and this turned out to be pretty straight-forward too. It only required the addition of three lines to my i3 config to handle the volume-up, volume-down, and mute keys:
|
|
||||||
|
|
||||||
```
|
|
||||||
bindsym XF86AudioRaiseVolume exec amixer -q set Master 4%+ unmute
|
|
||||||
bindsym XF86AudioLowerVolume exec amixer -q set Master 4%- unmute
|
|
||||||
bindsym XF86AudioMute exec amixer -q set Master toggle
|
|
||||||
```
|
|
||||||
|
|
||||||
And for fun added the block below to `~/.i3status.conf` to get the volume displayed on the status bar:
|
|
||||||
|
|
||||||
```
|
|
||||||
volume master {
|
|
||||||
format = "♪ %volume "
|
|
||||||
device = "default"
|
|
||||||
mixer = "Master"
|
|
||||||
mixer_idx = 0
|
|
||||||
}
|
|
||||||
```
|
|
@ -1,25 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-05-01T17:00:00Z"
|
|
||||||
title: Using Weka in Go
|
|
||||||
description: "Weka bindings for Go"
|
|
||||||
tags: [weka, golang, machinelearning, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
A couple of years ago I wrote a [blog post](/blog/13/6/12/wekapy) about wrapping some of [Weka](http://www.cs.waikato.ac.nz/ml/weka)'s classification functionality to allow it to be used programmatically in Python programs. A small project I'm currently working on at home is around taking some of the later research from my PhD work to see if it can be expressed and used as a simple web-app.
|
|
||||||
|
|
||||||
I began development in [Go](https://golang.org) as I hadn't yet spent much time working with the language. The research work involves using a Bayesian network classifier to help infer a [tweet's interestingness](http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6686092&tag=1), and while Go machine-learning toolkits do [exist](http://biosphere.cc/software-engineering/go-machine-learning-nlp-libraries), I wanted to use my existing models that were serialized in Java by Weka.
|
|
||||||
|
|
||||||
I started working on [WekaGo](https://github.com/willwebberley/WekaGo), which is able to programmatically support simple classification tasks within a Go program. It essentially just manages the model, abstracts the generation of [ARFF](http://www.cs.waikato.ac.nz/ml/weka/arff.html) files, and executes the necessary Java to make it quick and easy to train and classify data:
|
|
||||||
|
|
||||||
```
|
|
||||||
model := wekago.NewModel("bayes.BayesNet")
|
|
||||||
...
|
|
||||||
model.AddTrainingInstance(train_instance1)
|
|
||||||
...
|
|
||||||
model.Train()
|
|
||||||
model.AddTestingInstance(train_instance1)
|
|
||||||
...
|
|
||||||
model.Test()
|
|
||||||
```
|
|
||||||
|
|
||||||
Results from the classification can then be examined, as [described](https://github.com/willwebberley/WekaGo/blob/master/README.md).
|
|
@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-05-12T17:00:00Z"
|
|
||||||
title: Nintendo's Hotspot 'API'
|
|
||||||
description: "Using Nintendo's 3DS Hotspot API for Streetpass and Spotpass"
|
|
||||||
tags: [android, nintendo, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
Since getting a DS, [StreetPass](http://www.nintendo.com/3ds/built-in-software/streetpass) has become quite addictive. It's actually pretty fun checking the device after walking through town or using public transport to see a list of Miis representing the people you've been near recently, and the minigames (such as StreetPass Quest) that require you to 'meet' people in order to advance also make it more involved. Essentially the more you're out and about, the further you can progress - this is further accentuated through Play Coins, which can be used to help 'buy' your way forward and are earned for every 100 steps taken whilst holding the device.
|
|
||||||
|
|
||||||
![Nintendo Zone](/media/blog/nintendozone2.png)
|
|
||||||
|
|
||||||
The DS systems can also use relay points in Nintendo Zone hotspots to collect StreetPass hits. These zones are special WiFi access points hosted in certain commercial venues (e.g. in McDonalds and Subway restaurants), and allow you to 'meet' people around the world who also happen to be in another Nintendo Zone at the same time. As such, users can get a lot of hits very quickly (up to a maximum of 10 at a time). There are various ways people have [found](https://gbatemp.net/threads/how-to-have-a-homemade-streetpass-relay.352645) to set up a 'home' zone, but Nintendo have also published a [map](https://microsite.nintendo-europe.com/hotspots) to display official nearby zones.
|
|
||||||
|
|
||||||
However, their map seems a little clunky to use while out and about, so I wanted to see if there could be an easier way to get this information more quickly. When using the map, the network logs revealed `GET` requests being made to:
|
|
||||||
|
|
||||||
```
|
|
||||||
https://microsite.nintendo-europe.com/hotspots/api/hotspots/get
|
|
||||||
```
|
|
||||||
|
|
||||||
The location for which to retrieve data is specified through the `zoom` and `bbox` parameters, which seem to map directly to the zoom level and the bounds reported by the underlying Google Maps API being used. For some reason, the parameter `ummary_mode=true` also needs to be set. As such, a (unencoded) request for central Cardiff may look like this:
|
|
||||||
|
|
||||||
```
|
|
||||||
/hotspots/api/hotspots/get?summary_mode=true&zoom=18&bbox=51.480043,-3.180592,51.483073,-3.173028
|
|
||||||
```
|
|
||||||
|
|
||||||
Where the coordinates (`51.480043,-3.180592` and (`51.483073,-3.173028<`) respectively represent the lower-left and upper-right corners of the bounding box. The response is in JSON, and contains a lat/lng for each zone, a name, and an ID that can be used to retrieve more information about the host's zone using this URL format:
|
|
||||||
|
|
||||||
```
|
|
||||||
https://microsite.nintendo-europe.com/hotspots/#hotspot/<ID>
|
|
||||||
```
|
|
||||||
|
|
||||||
When the map is zoomed-out (to prevent map-cluttering) a zone 'group' might be returned instead of an individual zone, for each of which the size is indicated. Zooming back in to a group then reveals the individual zones existing in that area.
|
|
||||||
|
|
||||||
![Nintendo Zone 2](/media/blog/nintendozone1.png)
|
|
||||||
|
|
||||||
It seems that this server endpoint does not support cross-origin resource-sharing (CORS), which means that the data is not retrievable for a third-party web-app (at least, without some degree of proxying) due to browser restrictions. However, and especially since the endpoint currently requires no session implementation or other kind of authentication, the data seems very easily retrievable and manageable for non-browser applications and other types of systems.
|
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2015-05-27T17:00:00Z"
|
|
||||||
title: "Android: Consuming Nintendo Hotspot Data"
|
|
||||||
description: "Using the Nintendo Streetpass API in an Android app"
|
|
||||||
tags: [android, nintendo, project, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
I recently [blogged about](/blog/2015/5/12/nintendos-hotspot-api) Nintendo Hotspot data and mentioned it could be more usefully consumable in a native mobile app.
|
|
||||||
|
|
||||||
![Android Hotspot](/media/blog/android-hotspot.png)
|
|
||||||
|
|
||||||
As such, I wrote a small Android app for retrieving this data and displaying it on a Google Map. The app shows nearby hotspots, allows users to also search for other non-local places, and shows information on the venue hosting the zone.
|
|
||||||
|
|
||||||
The app is available on the [Play Store](https://play.google.com/store/apps/details?id=net.flyingsparx.spotpassandroid) and its source is published on [GitHub](https://github.com/willwebberley/NZone-finder).
|
|
@ -1,30 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2017-03-16T17:00:00Z"
|
|
||||||
title: Two Year Update
|
|
||||||
description: "Updating my blog after two years"
|
|
||||||
tags: [travel, life]
|
|
||||||
---
|
|
||||||
|
|
||||||
I haven't written a post since summer 2015. It's now March 2017 and I thought I'd write an update very briefly covering the last couple of years.
|
|
||||||
|
|
||||||
I finished researching and lecturing full-time in the summer of 2015. It felt like the end of an era; I'd spent around a third of my life at the [School of Computer Science and Informatics](http://www.cardiff.ac.uk/computer-science) at [Cardiff University](http://cf.ac.uk), and had experienced time there as an undergraduate through to postgrad and on to full-time staff. However, I felt it was time to move on and to try something new, although I was really pleased to be able to continue working with them on a more casual part-time basis - something that continues to today.
|
|
||||||
|
|
||||||
In that summer after leaving full-time work at Cardiff I went [interailing](http://www.interrail.eu) around Europe with my friend, Dan. It was an amazing experience through which I had a taste of many new European cities where we met lots of interesting people. We started by flying out to Berlin, and from there our route took us through Prague, Krakow, Budapest, Bratislava, Vienna, Munich, Koblenz, Luxembourg City, Brussels, Antwerp, and then finished in Amsterdam (which I'd been to before, but always love visiting).
|
|
||||||
|
|
||||||
![Interailing](/media/blog/interrailing.png)
|
|
||||||
|
|
||||||
_Some photos from the Interrail trip._
|
|
||||||
|
|
||||||
After returning, I moved to London to start a new full-time job with [Chaser](https://www.chaser.io) Having met the founders David and Mark at a previous [Silicon Milkroundabout](https://www.siliconmilkroundabout.com), Chaser was so great to get involved with - I was part of a fab team creating fin-tech software with a goal to help boost the cashflows in small-medium sized businesses. Working right in the City was fun and totally different to what seemed like a much quieter life in Cardiff. Whilst there, I learned loads more about web-based programming and was able to put some of the data-analysis skills from my PhD to use.
|
|
||||||
|
|
||||||
At the end of 2015 I was to move back to South Wales to begin a new job at [Simply Do Ideas](https://simplydo.co.uk) as a senior engineer. Again, this was a totally different experience involving a shift from fin-tech to ed-tech and a move from the relentless busy-ness of London to the quieter (but no less fun) life of Caerphilly - where our offices were based. Since I was to head the technical side of the business, I was able to put my own stamp on the company and the product, and was able to help decide its future and direction.
|
|
||||||
|
|
||||||
![Simply Do team](/media/blog/sdi_bett.jpg)
|
|
||||||
|
|
||||||
_Myself and Josh representing Simply Do Ideas at Bett 2017 in London._
|
|
||||||
|
|
||||||
In February 2016 I was honoured to be promoted to the Simply Do Ideas board and to have been made the company's Chief Technology Officer. Over the last year myself and the rest of the team have been proud to be part of a company growing very highly respected in a really interesting and exciting domain, and we're all very excited about what's to come in the near (and far) future!
|
|
||||||
|
|
||||||
I still continue to work with Cardiff University on some research projects and to help out with some of the final-year students there, I hope to write a little more about this work soon.
|
|
||||||
|
|
||||||
I feel so lucky to have been able to experience so much in such a short time frame - from academic research and teaching, being a key part of two growth startups, heading a tech company's technology arm, being a member of a board along with very highly-respected and successful entrepreneurs and business owners, and getting to meet such a wide range of great people. I feel like I've grown and learned so much - both professionally and personally - from all of my experiences and from everyone I've met along the way.
|
|
@ -1,36 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-01-20T17:00:00Z"
|
|
||||||
title: "Project Gemini"
|
|
||||||
description: "An introduction to the Gemini protocol and Gemini Space. Gemini clients, capsules, and search."
|
|
||||||
tags: [100daystooffload, gemini, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
Over the past few months I have been trying to use centralised "big tech" social media platforms less and instead immerse myself into the more community-driven "fediverse" of decentralised services that are connected ("federated") using common protocols (e.g. [ActivityPub](https://en.wikipedia.org/wiki/ActivityPub)). If you like, you can follow me on Mastodon ([@wilw@fosstodon.org](https://fosstodon.org/@wilw), recently migrated over from my [old mastodon.social account](https://mastodon.social/@will88)) and Pixelfed ([@wilw@pixelfed.social](https://pixelfed.social/@wilw)).
|
|
||||||
|
|
||||||
I've loved spending my time on these platforms - mainly due to the lack of noise and fuss, and more of a focus on sharing relevant content and interesting interactions with likeminded people (though of course this does depend on the [instance you join](https://joinmastodon.org)).
|
|
||||||
|
|
||||||
One of the things I've seen talked about more and more is [Gemini](https://gemini.circumlunar.space) - and having learned about it and participated myself - I have come to love the ideas behind it.
|
|
||||||
|
|
||||||
Some people will remember the [Gopher protocol](https://en.wikipedia.org/wiki/Gopher_(protocol)) - a lighter alternative to the web that was ultimately sidelined by most of the world in favour of the HTTP World-Wide Web in the early 90s. The [Gemini protocol](https://en.wikipedia.org/wiki/Gemini_(protocol)) is newer, having started in 2019, but is inspired by Gopher. In particular, it aims to solve some of the problems experienced by the modern HTTP(S) web we know today - around complexity, privacy, and "bloat" - and focuses on providing a graph of usefully connected _content_.
|
|
||||||
|
|
||||||
Gemini "sites" (known as "capsules" or "stars"/"starships") - the resources that form [Geminispace](https://en.wikipedia.org/wiki/Gemini_space) - are located using the `gemini://` URL scheme. Servers typically listen on port 1965: a homage to the [NASA's Gemini Project](https://en.wikipedia.org/wiki/Project_Gemini). Gemini text resources are similar to traditional HTML web pages in the sense that they can include links to other resources and provide structure and hierarchy through a markdown-like syntax. All Gemini resources must also be transferred using TLS.
|
|
||||||
|
|
||||||
**The image below shows my own capsule (found at [gemini://wilw.capsule.town](gemini://wilw.capsule.town)). If you can't open that link yet then read to the end of this post.**
|
|
||||||
|
|
||||||
![My Gem Capsule running in the Amfora client](/media/blog/amfora.png)
|
|
||||||
|
|
||||||
However, there are also significant differences. These `.gmi` files (and files with similar extensions served with the `text/gemini` MIME type) cannot indicate any styling instructions (as HTML often does with CSS) - and instead leaves the display and rendering of the file up to the client. In addition, whilst images can be served over the protocol, they cannot be included in (and rendered within) Gemini text files like they can in HTML. Similarly, there is no client-side dynamicity for these resources, such as the JavaScript included with most HTML web pages. It's simple; the client just renders whatever is provided by the server, and that's it.
|
|
||||||
|
|
||||||
The simplicity of the protocol - without styling, embedded images, client-side scripts, and more - offers a lightweight, ad-free, and content-oriented experience that is also available for low-powered devices and machines on slower networks. There is more of a focus on privacy (servers can't track or fingerprint you beyond knowing your IP address), and the relative "smallness" of it and absence of big-tech presence certainly brings back some of the fun and novelty of the early web as we remember it.
|
|
||||||
|
|
||||||
I certainly recommend visiting the [project's website](https://gemini.circumlunar.space) for more useful information.
|
|
||||||
|
|
||||||
## Getting involved
|
|
||||||
|
|
||||||
Since Gemini resources are not served using HTTP(S), you can't access them using a normal web browser (although you can use HTTP proxies such as [Mozz's Portal](https://portal.mozz.us/gemini/wilw.capsule.town)).
|
|
||||||
|
|
||||||
Instead, you'll need a Gemini client. I use [Amfora](https://github.com/makeworld-the-better-one/amfora) on my Mac and the [Gemini Browser](https://apps.apple.com/gb/app/the-gemini-browser/id1514950389) on my phone.
|
|
||||||
|
|
||||||
Once you have a client, you can view my own capsule by visiting [gemini://wilw.capsule.town](gemini://wilw.capsule.town). If you're interested in starting your own and want to see how mine is formed, you can view the [project source](https://git.wilw.dev/wilw/gemini-capsule).
|
|
||||||
|
|
||||||
I also recommend trying out the Gemini search-engine to discover what else lies in Geminispace. It is located at [gemini://gus.guru](gemini://gus.guru).
|
|
@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-01-29T17:46:00Z"
|
|
||||||
title: "100 Days to Offload Challenge"
|
|
||||||
description: "In 2021 I hope to be able to write 100 posts on my blog as part of the #100DaysToOffload Challenge."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
slug: 100-days-to-offload
|
|
||||||
---
|
|
||||||
|
|
||||||
I know that I've been a bit crap at updating my blog properly and consistently over the past few years. One of my new year's resolutions _this year_ is to get into the habit of writing more, and so [#100DaysToOffload](https://100daystooffload.com) seems a good opportunity to challenge myself to make sure I do.
|
|
||||||
|
|
||||||
The guidelines for and the ideas behind the challenge are [on the challenge's website](https://100daystooffload.com). There aren't any rules really, but the essential message is to "Just. Write.". So, I'll do my best before the end of 2021, and given that I've already published two posts this year I'll count this _number 3_.
|
|
||||||
|
|
||||||
I will try to keep things tech-related as much as possible. There's only so much there that I can write about though, so I will probably also include bits from my life, books I read, things I've watched, etc.
|
|
||||||
|
|
||||||
If you want to follow along, you can [subscribe to my RSS feed](/rss.xml). If you need an RSS reader, I can definitely recommend [Reeder 5](https://www.reederapp.com), which is available for [macOS](https://itunes.apple.com/app/id1529448980) and [iOS](https://apps.apple.com/app/id1529445840) - it's fab. If you're trying the challenge too, then [let me know](https://fosstodon.org/@wilw) so I can check out your posts!
|
|
@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-01-30T15:31:00Z"
|
|
||||||
title: "Out with the Old: Moving to Gitea"
|
|
||||||
description: "Why and how I moved from using GitHub to a self-hosted Gitea service."
|
|
||||||
tags: [selfhosted, 100daystooffload, selfhost, gitea, github, analysis, technology, opinion]
|
|
||||||
slug: moving-to-gitea
|
|
||||||
---
|
|
||||||
|
|
||||||
If you've visited my geminispace ([gemini://wilw.capsule.town](gemini://wilw.capsule.town)) you'll have noticed that I've recently been on a mission to decentralise the every-day tools and services I use, and will understand the reasons why. This post will likely become part of a series of posts in which I talk about taking control and responsibility for my own data.
|
|
||||||
|
|
||||||
One of the changes I've made more recently is to move many of my own personal projects (including the [source for this site](https://git.wilw.dev/wilw/wilw.dev)) over to a self-hosted [Gitea](https://gitea.com) service. I chose Gitea personally, but there are many other self-hosted solutions available ([see this post for examples and comparisons](https://www.paritybit.ca/blog/choosing-a-self-hosted-git-service)).
|
|
||||||
|
|
||||||
### The "problem" with GitHub
|
|
||||||
|
|
||||||
I've been a [GitHub member](https://github.com/willwebberley) for as long as I can remember, and will continue to be so and actively use it in my more professional work and when contributing to other projects. However, I don't think I'm alone in that athough I try and develop things in the public and keep many home projects open-source, I **usually** don't do it with the _intention_ of receiving contributions from others. The discoverability on GitHub is great (though some may argue that its size means that things can get [a bit "diluted"](https://slashdev.space/posts/2021-01-23-signal-to-noise)), but many of the projects I develop are for my own use - and while anyone is free to take the code and use it as they want, the powerful tools offered by GitHub (and other centralised services) just never get used for these types of projects.
|
|
||||||
|
|
||||||
The other thing is that GitHub seems to have gradually become the LinkedIn of the software world, and many people use it as the basis of their CV or portfolio. This is great as it allows other people and potential employers to get an idea of the kinds of things a developer works on, coding style, and so on, but there's always a certain feel of _pressure_ (or sometimes subconscious competitiveness) that people on any socially-focused platforms might get.
|
|
||||||
|
|
||||||
When Twitter introduced their Fleets feature they mentioned that one of the motivators behind the project is that they understand that some people [get a fear of posting tweets](https://blog.twitter.com/en_us/topics/product/2020/introducing-fleets-new-way-to-join-the-conversation.html) when things feel so public. I've seen the same thing with GitHub in that people feel put-off contributing or publishing their own work in public repositories "in case someone sees" - is this a barrier to entry for more introverted developers? Inversely, re-engagement mechanisms - like the contributions graph on each user's profile - may make developers just publish for the sake of it.
|
|
||||||
|
|
||||||
None of these things are necessarily problems or wrong (private repos are always an option, for example), but these days it just feels more appropriate to be responsible for your own data as much as possible - especially when not making the most of what alternatives can provide you with, and it's always good to use and encourage alternative options so that one service doesn't become the expected norm.
|
|
||||||
|
|
||||||
### My experience so far
|
|
||||||
|
|
||||||
Since migrating many projects over to the smaller "world" that is my own git server, I get the feel that things are slower ([in a good way](https://jackcheng.com/essays/the-slow-web)) and I have been spending more time curating projects and working on the things I actually want to work on (though many are still private "for now"!).
|
|
||||||
|
|
||||||
If you're interested in trying your own self-hosted Gitea server, then it's pretty straight forward if you have a VPS (I just used the official Docker images, for which there are instructions [in the documentation](https://docs.gitea.io/en-us/install-with-docker)).
|
|
||||||
|
|
||||||
To move existing repositories over it's as simple as changing the `remote` (or adding a new one) in your local git configuration for the project and then re-pushing. Gitea also includes a migration service to automatically pull repositories through, and can also be set-up to mirror other remote repos.
|
|
||||||
|
|
||||||
In terms of performance, I've found it quick to use and navigate (certainly faster than GitHub's web interface) on a $10 VPS from Linode that I had anyway and on which I host many other services too.
|
|
||||||
|
|
||||||
It's definitely worth a try if this is something you're interested in. [Let me know](https://fosstodon.org/@wilw) how you get on.
|
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-01-31T18:15:00Z"
|
|
||||||
title: "Dirty Little Secrets by Jo Spain"
|
|
||||||
description: "A short review of the murder mystery book Dirty Little Secrets by Jo Spain."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: dirty-little-secrets
|
|
||||||
---
|
|
||||||
|
|
||||||
Recently I finished reading [Dirty Little Secrets](https://www.goodreads.com/book/show/38120306-dirty-little-secrets). This is the first book I have read by [Jo Spain](https://www.goodreads.com/author/show/14190033.Jo_Spain) and the first time I have known of the author.
|
|
||||||
|
|
||||||
![Dirty Little Secrets cover](/media/blog/dirtylittlesecrets.jpg)
|
|
||||||
|
|
||||||
The book first appears as though it's a typical murder mystery set in a relatively wealthy gated community in Ireland - however the intricacies of the characters and narrative quickly made it hard to put down. The story begins with the discovery of the long-dead body of the woman who lives at number 4 and continues with the involvement of the detectives as they investigate the strange incident.
|
|
||||||
|
|
||||||
The narrative primarily focuses on and is told from the perspectives of the neighbours and the police. It becomes clear that everyone - including the detectives - has hidden backgrounds and the story cleverly interwines past and present timelines (along with later repeated scenes told from different viewpoints) such that open-ended questions and arcs are often eventually resolved.
|
|
||||||
|
|
||||||
I really enjoyed this book, which I listened to as an audiobook well narrated by Michele Moran. It helps reinforce the reality that everyone has obscured backgrounds or secret parts to them, which can be prematurely forced into the open by external events.
|
|
||||||
|
|
||||||
Interestingly, another recent book I read - [The Guest List by Lucy Foley](https://www.goodreads.com/book/show/51933429-the-guest-list) - is a similar murder mystery also set in Ireland (Goodread's content recommender systems clearly working at their best). Although on paper it is similar (in terms of its location and multiple character-based perspectives) and it is similarly well-reviewed by others, I personally didn't really enjoy it. _The Guest List_ is certainly more of a suspenseful "thriller" in the traditional sense (largely given its setting) and the conclusion is probably more shocking, so I am not surprised it received good reviews. However, I just found the story to be a little dull and the characters a bit uninteresting and un-relatable. Each to their own, but I felt Jo Spain's storytelling and character development to be far more compelling.
|
|
@ -1,37 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-01T21:06:00Z"
|
|
||||||
title: "Why not SQLite?"
|
|
||||||
description: "An open-ended post about why (or why not) use SQLite in your projects rather than a fully-fledged DBMS server."
|
|
||||||
tags: [100daystooffload, technology, opinion]
|
|
||||||
slug: why-not-sqlite
|
|
||||||
---
|
|
||||||
|
|
||||||
If you need a database for your next project, why not first consider if [SQLite](https://sqlite.org) might be a good option? And I don't mean just for getting an MVP off the ground or for small personal systems; I mean for "real" production workloads.
|
|
||||||
|
|
||||||
![Why not Sqlite?](/media/blog/sqlite.jpg)
|
|
||||||
|
|
||||||
Many people will be quick to jump on this with chimes of "it's not designed for production", but I think it depends on what is actually _meant_ by "production"? Sure, it's not the right choice for every scenario - it wouldn't work well in distributed workloads or for services expected to receive a very high volume of traffic - but it has been used successfully in many real-world cases.
|
|
||||||
|
|
||||||
What made me feel the need to write this article was seeing this sentence in the [README of the Synapse Docker repo](https://hub.docker.com/r/matrixdotorg/synapse/):
|
|
||||||
|
|
||||||
> By default it uses a sqlite database; for production use you should connect it to a separate postgres database. - [matrixdotorg/synapse](https://hub.docker.com/r/matrixdotorg/synapse/)
|
|
||||||
|
|
||||||
Don't get me wrong. I totally get its meaning, but at the same time do personal Matrix servers or [home Nextcloud servers](https://help.nextcloud.com/t/nextcloud-and-sqlite/34304) not count as "production"?
|
|
||||||
|
|
||||||
[Pieter Levels](https://levels.io) famously used SQLite to help drive revenues from some of his products to [well over six-digit dollar values](https://www.nocsdegree.com/pieter-levels-learn-coding), and SQLite's [own 'appropriate uses' list](https://www.sqlite.org/whentouse.html) explains where it can be useful:
|
|
||||||
|
|
||||||
> SQLite works great as the database engine for most low to medium traffic websites (which is to say, most websites) - [sqlite.org](https://www.sqlite.org/whentouse.html)
|
|
||||||
|
|
||||||
Even if your site or service does eventually outgrow SQLite (which will be a nice problem to have), your application code will still be using SQL and so it should be relatively easy to migrate to something like [PostgreSQL](https://www.postgresql.org).
|
|
||||||
|
|
||||||
As [Paul Graham said](http://paulgraham.com/ds.html), "do things that don't scale".
|
|
||||||
|
|
||||||
Of course, it is backed by disk and so is subject to the usual I/O constraints applicable to any file, but nearly all VPS providers offer SSD-backed instances these days and SQLite [claims to be faster than filesystem I/O](https://sqlite.org/fasterthanfs.html) anyway.
|
|
||||||
|
|
||||||
It's worth remembering that there can be huge overheads and costs in setting up "production-ready" database servers. You'll need to think about provisioning the instance itself, installation of dependencies, certificates, the usual networking hardening (firewalls, ports, etc.) - and then keeping all of this up-to-date too. Even when using managed database services there are still user roles, authentication and rotating credentials to worry about, along with securely provisioning your applications with the connection strings.
|
|
||||||
|
|
||||||
Having all of these things to worry about carries the additional risk of encouraging people to become lazy or to not have the time needed to make sure everything is done properly; an easy way to accidentally introduce security issues. Plus, if you have multiple environments (e.g. for staging or testing) then these factors, and the associated costs, amplify.
|
|
||||||
|
|
||||||
There is also some interesting discussion on the topic in this [Hacker News thread](https://news.ycombinator.com/item?id=23281994) from last year.
|
|
||||||
|
|
||||||
I just think it's definitely worth a go before jumping straight into alternative heavier options. It's free, has a [smaller footprint](https://sqlite.org/footprint.html), has easily accessible bindings for many languages, and you can get started in minutes - [all you need is a file](https://sqlite.org/onefile.html).
|
|
@ -1,30 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-02T20:31:00Z"
|
|
||||||
title: "Blogging for Devs"
|
|
||||||
description: "'Blogging for Devs' is an excellent course by Monica Lent for gaining confidence in writing about technology, growing your audience, and blog strategy."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
---
|
|
||||||
|
|
||||||
A few months ago I discovered [Blogging for Devs](https://bloggingfordevs.com) - I think through [Product Hunt](https://www.producthunt.com/posts/blogging-for-devs) when it made it to #1 Product of the Day back in August last year.
|
|
||||||
|
|
||||||
At the time blogging was something I had been thinking about quite a lot. I actively followed several other blogs - both from people I know and from others in the tech community - and it was clear that, in addition to producing content that was interesting to read by others, writing was something these bloggers actually enjoyed and found valuable too for their own learning and engagement with the community.
|
|
||||||
|
|
||||||
I have also always enjoyed writing (you have to if you're ever involved in research!). I was still posting things occasionally, and had been doing so for several years, but blogging had just never really got round to forming any part of my normal routine. It was certainly something I wanted to do more of - to write, engage more with likeminded people, and for all the other personal and professional benefits associated with consistent and frequent writing - and so this was clearly a habit I needed to learn to form.
|
|
||||||
|
|
||||||
![Blogging for Devs website](/media/blog/bloggingfordevs.png)
|
|
||||||
|
|
||||||
Blogging for Devs is a course and newsletter created by [Monica Lent](https://monicalent.com) and, with all of this running through my head, I signed-up almost straight away.
|
|
||||||
|
|
||||||
I don't want to give away too much about Monica's course or content (it's free to [sign up yourself!](https://bloggingfordevs.com)), but one thing I found really valuable was actually something that happened right at the start of the course. After I signed-up I received an automated email asking _why_ I had chosen to sign-up and what I wanted to learn. Of course, I know this is largely to help Monica shape her course and to get an understanding of people's needs, but I actually found it a super-helpful self-reflection.
|
|
||||||
|
|
||||||
Why _didn't_ I blog more? What was blocking me, even though it was something I actively wanted to do? After a while of thinking it boiled down to one main thing, which was my **confidence** and, in particular a fear of what people would think if they read it (especially if they knew me!) and also a worry of writing about things no-one is actually interested in ("why would anyone want to read this?"). I summarised this and wrote it back to Monica's email, and she got back to me with a nice personal reply not long after.
|
|
||||||
|
|
||||||
The course covers lots of topics - from SEO and branding through to actual blog content. However, my issue was still very much the whole confidence thing. One thing that became clear to me during the course is that the most important step in getting over that barrier, and then forming a habit - whether it's getting up early, doing more exercise, or writing blog posts - is just to **start doing it**.
|
|
||||||
|
|
||||||
And I don't mean tomorrow or next week, I mean **today**. Just pick something to write about. If you're just getting started it can be a quick post introducing yourslf ([WriteFreely](https://writefreely.org) is a great platform if you need one). If you've already got something going and want to write more (like me) then write a short post about something you've learned today - tech or not. The important thing is just to start doing it.
|
|
||||||
|
|
||||||
Of course, not everything you post will be enjoyed by everyone, but that's OK. It's not always solely about your audience; you're doing it for yourself too, remember.
|
|
||||||
|
|
||||||
And also remember to sign-up to [Blogging for Devs](https://bloggingfordevs.com) today too. It's a fantastic course. If you look back at my [writing history](/blog) you'll notice the difference it's had on me. I've blogged much more consistently and effectively since taking the course, and I'm still working through some of the content even today.
|
|
||||||
|
|
||||||
Even if you're already a seasoned blogger I'm sure you'll pick up some extra tips and helpful insights, and the Blogging for Devs website also has a great [Trends section](https://bloggingfordevs.com/trends) to help you discover new blogs to follow.
|
|
@ -1,65 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-03T22:16:00Z"
|
|
||||||
title: "RSS: The Rise and Fall... and Rise Again"
|
|
||||||
description: "An opinion piece on RSS, its popularity over the last couple of decades, and how it can make a resurgence again."
|
|
||||||
tags: [100daystooffload, technology, opinion]
|
|
||||||
slug: rss-rise-fall-rise
|
|
||||||
---
|
|
||||||
|
|
||||||
Many people would consider RSS - Really Simple Syndication - to be a relic of the past. However I think it has been making a comeback.
|
|
||||||
|
|
||||||
RSS is a mechanism by which people can automatically receive updates from individual websites, similar to how you might follow another user on a social networking service. Software known as RSS _readers_ can be used to subscribe to RSS _feeds_ in order to receive these updates. As new content (e.g. a blog post) is published to an RSS-enabled website, its feed is updated and your RSS reader will show the new post the next time it refreshes. Many RSS readers have an interface similar to an email client, with read/unread states, folders, favourites, and more.
|
|
||||||
|
|
||||||
## The rise
|
|
||||||
|
|
||||||
RSS was [first released](https://en.wikipedia.org/wiki/RSS) in early 1999, and it steadily gained popularity amongst content producers and consumers, with adopters from media outlets and software implementations making their way into early Internet Eplorer and Firefox versions, amongst others. These were the days before the "real" [Web 2.0](https://en.wikipedia.org/wiki/Web_2.0) hit, and in which websites were very much more silos of information. Tools like RSS were powerful then because they enabled the easy _aggregation_ of information from multiple sources.
|
|
||||||
|
|
||||||
Not too long after this (Web 2.0 'began' in the mid 2000's), and during the years ever since, mainstream social networks became ubiquitous. Many people flock(ed) to these as a way to share and subscribe (by following others) to receive updates in real time, several times a day and from lots of different people and organisations. These services enabled features far beyond aggregation by allowing easy sharing, rating (e.g. likes) and commentating such that today such services have become the primary means of information and news sharing and reception for many people.
|
|
||||||
|
|
||||||
## The fall(?)
|
|
||||||
|
|
||||||
At that time RSS was still very much "a thing" for many people (though the [discontinuation of the hugely popular Google Reader in 2013](https://en.wikipedia.org/wiki/Google_Reader#Discontinuation) was a bit of a bummer to these communities). However new people now joining the web scene would be far more likely to instead engage with these extremely well-funded, well-marketed, and _centralised_ social platforms - [perfectly engeineered to be addictive](https://www.thesocialdilemma.com), entirely driven and propagated by [FOMO](https://en.wikipedia.org/wiki/Fear_of_missing_out), and focused on content-sharing (even if the content is often [misinformation](https://www.theguardian.com/technology/2021/jan/30/facebook-letting-fake-news-spreaders-profit-investigators-claim)) - where _you_ are the product, rather than spend the time researching and subscribing to individual RSS feeds.
|
|
||||||
|
|
||||||
To some commentators in this space the concept behind all of these social platforms is known as the _[fast web](https://jackcheng.com/essays/the-slow-web/#the-fast-web)_ - a web that tells you when and what information to consume rather than letting you make that decision for yourself. Facebook, Twitter, Instagram, and others all started as just a _chronological_ timeline of interesting content from friends and family. On all of these services today the "algorithm" determines what (and who) goes in your timeline, and it constantly learns what to feed you - and when - in order to get those few extra minutes from you each day. This is literally its business model.
|
|
||||||
|
|
||||||
What used to be an innocent bookmarking tool, Twitter's "favouriting" mechanism is now essentially a game of __retweet roulette__ in which the algorithm will every now and again choose to include your "bookmarks" (not just retweets) on the feeds of people who follow you. If that's not anxiety-inducing or user-hostile then I don't know what is!
|
|
||||||
|
|
||||||
Of course this is something Facebook has done for a while too, except perhaps in a more sinister way - such as implying [a user has liked something when they haven't at all](https://www.baekdal.com/thoughts/facebook-graph-search-privacy-woes).
|
|
||||||
|
|
||||||
Other social networking tools can be more user-friendly. For example, the open-source [Mastodon](https://en.wikipedia.org/wiki/Mastodon_(software)) software powers distributed social networks that aren't fuelled by addiction and instead give you more control over what you receive and where your posts go. However these tools still have some way to go to becoming anywere near mainstream.
|
|
||||||
|
|
||||||
I want to caveat some of the above: I obviously don't think any of this is the fault of the individual. These social platforms are fantastically easy places to set-up a web presence. Creating an Instagram, Facebook, or Tik-Tok account for you (or your business) takes literal seconds. Within a minute you can have your profile setup and be following a dozen people, and already getting engagement and "reactions" from others (remember those "Your friend, X, is now on Instagram!" type notifications?).
|
|
||||||
|
|
||||||
With all of this power and efficiency at the fingertips it's no wonder that people don't create their own personal websites anymore, or feel the need to reach out to actively keep-up with other such sites. What's the point in re-inventing the wheel when I can easily create a Facebook page for myself that includes an inbuilt blog "feed", a space for links, photos, and more? And it's "free"! The barrier to creating a self-owned personal space on the internet for yourself is considered too high for most people, and is probably still seen as "geeky" even if it does come with all the benefits of privacy and control.
|
|
||||||
|
|
||||||
And I'm not saying that self-owned spaces, RSS, and that whole ecosystem are related, or the opposite to, mainstream social media; more that it is a useful way to compare and contrast different ways of accessing and disseminating information and the level of control one has over this.
|
|
||||||
|
|
||||||
This probably feels like I'm going way off-piste, and I sort of have, but my key meaning here is that for several years the concept of RSS has evaporated from popular knowledge because people haven't _needed_ it - either as a tool for receiving _or_ disseminating information. Ask your non-tech friends and family if they've heard of RSS (and know what it is for a bonus point) - I bet the positive response rate will be low in most cases, especially in younger respondents.
|
|
||||||
|
|
||||||
Also, I don't think this is solely the fault of the social giants. Online media outlets - which would have needed to rely on RSS for years before online social media became more mainstream - now often completely ignore it or treat it as a second-class citizen.
|
|
||||||
|
|
||||||
The [BBC News website](https://www.bbc.co.uk/news) happily displays large friendly icons for Facebook, Twitter, and the like, but no mention of RSS (try `ctrl-F`). In fact, you'll probably need to search the web for "bbc rss" in order to find the RSS feeds that are [listed on a page that hasn't been updated for over a decade](https://www.bbc.co.uk/news/10628494) and which still lists IE7 and the long-discontinued Google Reader as sensible options (though ironically I suppose this does indicate the stability and robustness of the RSS system).
|
|
||||||
|
|
||||||
## The rise again
|
|
||||||
|
|
||||||
Anyway, all that sounds a bit doom and gloom but I definitely think we are starting to see a shift in people's attitude towards and - importantly, trust in - these big tech companies. Facebook's recent attitiude towards information collection (and subsequent sharing) has [hit mainstream headlines](https://www.independent.co.uk/life-style/gadgets-and-tech/facebook-update-apple-privacy-ads-b1795916.html) and everyone must have seen [Whatsapp's popup about data sharing](https://www.techradar.com/news/whatsapps-new-privacy-policy-requires-you-to-share-data-with-facebook). Too much uncertainty undermines the trust in these platforms, and people have understandably sought out other options. A few weeks ago Telegram reported an addition of [25 million new users within 72 hours](https://www.androidpolice.com/2021/01/12/telegram-adds-25-million-new-users-in-just-72-hours-as-rival-apps-falter) as a result of these policy "changes".
|
|
||||||
|
|
||||||
My parents aren't really tech-aware at all but even they were telling me last week on a video call about this "new app Signal" they had downloaded and begun to use with their friends - without any of my input.
|
|
||||||
|
|
||||||
I'm not sure what it is, but people seem to _care_ more about their data these days. Whether that's because of GDPR, the fact that coronavirus means people aren't endlessly scrolling through social feeds on their daily commutes anymore, or something else or a mixture of everything. And that extends to being more picky about the information they receive too.
|
|
||||||
|
|
||||||
Either way, I've noticed more and more [posts like this](https://atthis.link/blog/2021/rss.html) (and the subsequent [reactions and discussions](https://news.ycombinator.com/item?id=26014344)) recently, and the [#100DaysToOffload](https://100daystooffload.com) movement has brought about a surge in people - myself included, really - creating their own longer-form content, for which RSS is a perfect distribution mechanism.
|
|
||||||
|
|
||||||
I think we're on a bit of a brink representing a general - but real - change in attitude from people towards data, and the time that they choose to give to now lesser-trusted platforms. It is our responsibility to help educate about the alternative options so that those around us can make their own decisions. Whilst I am relatively new to RSS in the grand scheme of things (having only really started properly engaging with it about a year ago), it already makes me feel more in control of what I view, and when.
|
|
||||||
|
|
||||||
Whilst this concept doesn't need to be limited to RSS, it's a great starting point as it's easy to understand. It "feels" friendly, it helps power connections to the decentralised and [small web](https://ar.al/2020/08/07/what-is-the-small-web/).
|
|
||||||
|
|
||||||
It, as a concept, has no business model. Though of course you can pay for the software you use, and websites can make money through ads, but at least you have a _choice_ regarding who you subscribe to and the software you use to do it through (and [there are lots of choices](https://en.wikipedia.org/wiki/Comparison_of_feed_aggregators)). You aren't tied into anything and it respects your privacy - you don't need to "sign-up", provide your details, and sites don't know that _you_ personally have subscribed.
|
|
||||||
|
|
||||||
RSS may be age-old, but it is an excellent way to still get the information you need as you begin to use mainstream social media less, and - although it doesn't need to be slow in itself - it is a fantastic tool to combine with the growing and user-respecting world of the [slow web](https://jackcheng.com/essays/the-slow-web#timely-vs-real-time), in which timeliness (where you're in control) is far more important than "real-time".
|
|
||||||
|
|
||||||
--
|
|
||||||
|
|
||||||
### Edit
|
|
||||||
|
|
||||||
I've received some replies to this post that talk about the lack of mentions of the [Atom standard](https://en.wikipedia.org/wiki/Atom_(Web_standard)) and podcasts. RSS certainly is (and has been) a fantastic way to subscribe to podcasts. Its flexibility and ease of use has been a great tool for both content creators and consumers, and has helped to build the ecosystem of podcast apps and services we see today. And of course, there are other very useful distribution mechanisms and standards available for distributing information, such as Atom. This post was focused more on contrasting this family of systems with what many people may consider "mainstream" services, and how the wide adoption of the latter has perhaps had an effect on the former.
|
|
@ -1,82 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-05T23:46:00Z"
|
|
||||||
title: "React State Management with Zustand"
|
|
||||||
description: "How to manage your JavaScript React app's global state using the zustand library."
|
|
||||||
tags: [100daystooffload, technology, javascript, react]
|
|
||||||
slug: react-state-zustand
|
|
||||||
---
|
|
||||||
|
|
||||||
## React state
|
|
||||||
|
|
||||||
React state management is what gives the library its reactiveness. It's what makes it so easy to build performant data-driven applications that dynamically update based on the underlying data. In this example the app would automatically update the calculation result as the user types in the input boxes:
|
|
||||||
|
|
||||||
```jsx
|
|
||||||
import React, { useState } from 'react';
|
|
||||||
|
|
||||||
function MultiplicationCalculator() {
|
|
||||||
const [number1, setNumber1] = useState(0);
|
|
||||||
const [number2, setNumber2] = useState(0);
|
|
||||||
return ( <>
|
|
||||||
<input value={number1} onChange={e => setNumber1(parseInt(e.target.value))} />
|
|
||||||
<input value={number2} onChange={e => setNumber2(parseInt(e.target.value))} />
|
|
||||||
<p>The result is {number1 * number2}.</p>
|
|
||||||
</> );
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
![The resultant React app, showing two text inputs and a result line](/media/blog/zustand1.png)
|
|
||||||
|
|
||||||
The entire function will re-run on each state change (the `setNumber1` and `setNumber2` functions) in order to reactively update the result text. The multiplication itself could be calculated in a `useEffect` but it is simpler to look at it as shown.
|
|
||||||
|
|
||||||
This is totally fine for many apps, however this quickly becomes unmanageable when you need to share state (e.g. `number1`) between this component and another component - and ensure that a state change in the former can be reflected in the latter - whether it's an ancestor, descendant, or a more distant component. Of course, you can pass the state variables (and the associated `setState` functions) from a parent down as `props` to child components, but as soon as you're doing this more than a handful of times or in cases where state needs to be shared across distant components this quickly becomes hard to maintain or understand.
|
|
||||||
|
|
||||||
An example of shared state might be to store the details about the currently logged-in user in an app. A navigation bar component would need to know about the user state to show a link to the correct profile page, and another component may need access to the same state in order to allow the user to change their name.
|
|
||||||
|
|
||||||
## Context and Redux
|
|
||||||
|
|
||||||
This is by no means a new problem. Many of these issues are solved using React's [Context API](https://reactjs.org/docs/context.html) and there are also libraries like Redux that are useful in perhaps more complex scenarios - it's much more opinionated and involves a fair bit of extra code that may be overkill in many apps. Adding just a small piece of state (e.g. a new text input), and the ability to alter it, to Redux involves updating reducers, creating an action, dispatchers, and wiring things through to your components using `connect`, `mapStateToProps`, and `mapDispatchToProps`. Plus you'll need the relevant provider higher up.
|
|
||||||
|
|
||||||
Redux is certainly a fantastic library, however, and I use it in many apps. [This post](https://changelog.com/posts/when-and-when-not-to-reach-for-redux) is useful and discusses the cases in which you may (or may not) want to use Redux.
|
|
||||||
|
|
||||||
## Zustand
|
|
||||||
|
|
||||||
In this post I want to talk about another option that is perhaps quicker and easier to use, expecially for those newer to React (though it's also great for more seasoned React developers) - [zustand](https://github.com/pmndrs/zustand). Not only is this the German word for "state", it's also a nice and succinct library for state management for React.
|
|
||||||
|
|
||||||
The zustand library is pretty concise, so you shouldn't need to add too much extra code. To get started just add it as a dependency to your project (e.g. `yarn add zustand`). Now let's rewrite the earlier multiplication example but using zustand.
|
|
||||||
|
|
||||||
First, define a _store_ for your app. This will contain all of the values you want to keep in your global state, as well as the functions that allow those values to change (_mutators_). In our store, we'll extract out the state for `number1` and `number2` we used in our component from earlier, and the appropriate update functions (e.g. `setNumber1`), into the store:
|
|
||||||
|
|
||||||
```jsx
|
|
||||||
import React from 'react';
|
|
||||||
import create from 'zustand';
|
|
||||||
|
|
||||||
const useStore = create((set) => ({
|
|
||||||
number1: 0,
|
|
||||||
number2: 0,
|
|
||||||
setNumber1: (x) => set(() => ({ number1: x })),
|
|
||||||
setNumber2: (x) => set(() => ({ number2: x })),
|
|
||||||
}));
|
|
||||||
```
|
|
||||||
|
|
||||||
Now - in the same file - we can go ahead and rewrite our component such that it now uses this store instead of its own local state:
|
|
||||||
|
|
||||||
```jsx
|
|
||||||
function MultiplicationCalculator() {
|
|
||||||
const { number1, number2, setNumber1, setNumber2 } = useStore();
|
|
||||||
return ( <>
|
|
||||||
<input value={number1} onChange={e => setNumber1(parseInt(e.target.value))} />
|
|
||||||
<input value={number2} onChange={e => setNumber2(parseInt(e.target.value))} />
|
|
||||||
<p>The result is {number1 * number2}.</p>
|
|
||||||
</> );
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
That's it - we now have a React app that uses zustand. As before, the component function runs each time the store's state changes, and zustand ensures things are kept up-to-date.
|
|
||||||
|
|
||||||
In the example above the two blocks of code are in the same file. However, the power of zustand becomes particularly useful when the store is shared amongst several components across different parts of your app to provide "global state".
|
|
||||||
|
|
||||||
For example, the `useStore` variable could be declared and exported from a file named `store.js` somewhere in your app's file structure. Then, when a component needs to access its variables or mutator functions it just needs to - for example, `import useStore from 'path/to/store'` - and then use [object destructuring](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Destructuring_assignment) (as on line 11 above) to pull out the needed variables and functions.
|
|
||||||
|
|
||||||
It's worth checking out [the documentation](https://github.com/pmndrs/zustand) since zustand is super flexible and can be used in ways that help improve performance, such as taking advantage of memoizing and state slicing. It also makes what can be tricky in other such libraries - e.g. asynchronous state updates - trivial.
|
|
||||||
|
|
||||||
If you've already got an established app using another state management system it may not be worth migrating everything over. But give zustand a go in your next project if you're looking for straight forward, yet powerful, state management.
|
|
@ -1,43 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-06T19:34:00Z"
|
|
||||||
title: "Add icing to your websites using pattern.css"
|
|
||||||
description: "How to use the great pattern.css library to easily add subtle patterns and backgrounds to your websites and web apps."
|
|
||||||
tags: [100daystooffload, technology, css]
|
|
||||||
slug: pattern-css
|
|
||||||
---
|
|
||||||
|
|
||||||
Shapes and patterns can be leveraged in user interfaces to guide your users, draw attention to content, lend weight or emphasis, or just for aesthetics and decoration.
|
|
||||||
|
|
||||||
Layout and styling on the web is typically handled using CSS, however mastering CSS to the level where you can confidently take advantage of more advanced features is definitely not easy. I've been developing for the web almost full-time for a decade and I'm still pretty crap when it comes to doing complex stuff with CSS.
|
|
||||||
|
|
||||||
That said, some people have done some [mindblowing things using CSS and just a single `div` element](https://a.singlediv.com).
|
|
||||||
|
|
||||||
The [Subtle Patterns](https://www.toptal.com/designers/subtlepatterns) website has been around for years - it's a great resource for discovering and accessing nice textures and backgrounds for your creations. There are also some nice libraries in CSS that let you describe patterns programmatically, and this comes with the added performance advantages that CSS provides (as browsers are pretty performant when it comes to CSS).
|
|
||||||
|
|
||||||
[pattern.css](https://bansal.io/pattern-css) (created by [bansal-io](https://github.com/bansal-io)) is a great little CSS-only library for adding simple, but effective, patterns to your websites. For example, backgrounds for elements, false "block" shadows, or even within the text itself. All that's needed are a few extra classes on your elements and the small (less than 1Kb when gzipped) library will do the rest.
|
|
||||||
|
|
||||||
To get started, you can add the library to your project using your normal JavaScript package manager (e.g. `yarn add pattern.css`). Then either include the CSS file in your HTML or, if you're using React or another framework/builder that allows you to import CSS directly, you can:
|
|
||||||
|
|
||||||
```jsx
|
|
||||||
import 'pattern.css/dist/pattern.css'
|
|
||||||
```
|
|
||||||
|
|
||||||
Once that's done it's just a matter of [adding classes](https://bansal.io/pattern-css#usage) to your markup. All the `pattern.css` classes start with `pattern`, followed by the type of pattern (e.g. `-diagonal-stripes`), followed by the "size" of the pattern (e.g. `-sm`).
|
|
||||||
|
|
||||||
For example, to build a `div` with a chunky zig-zag patterned background you just need to use:
|
|
||||||
|
|
||||||
```html
|
|
||||||
<div class="pattern-zigzag-lg">
|
|
||||||
...
|
|
||||||
</div>
|
|
||||||
```
|
|
||||||
|
|
||||||
To change the colour of the pattern just set a `color` style on the element. If the element also has a `backgroundColor` then this will display through the transparent bits:
|
|
||||||
|
|
||||||
```html
|
|
||||||
<div class="pattern-diagonal-stripes-md" style="color: red; backgroundColor: yellow">
|
|
||||||
...
|
|
||||||
</div>
|
|
||||||
```
|
|
||||||
|
|
||||||
Have a read through [the documentation](https://bansal.io/pattern-css#hero) for examples and further pattern types. It's quick to get the hang of and far more effective to use if - like me - you find some of the complexities of CSS hard to get your head around!
|
|
@ -1,53 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-07T19:31:00Z"
|
|
||||||
title: "Using Monica to Help Manage your Personal Relationships"
|
|
||||||
description: "Why you need a 'personal relationship manager' and how to set-up Monica on your own server."
|
|
||||||
tags: [100daystooffload, technology, life, selfhost]
|
|
||||||
slug: monica-personal-crm
|
|
||||||
---
|
|
||||||
|
|
||||||
Many people no longer feel comfortable using Facebook. Whether you were never a member to begin with or you've had an account but chosen to remove yourself from the service, or you've simply tried to start using it less - either way, it's no surprise given the way that they, across their family of products (including Instagram and WhatsApp), operate in terms of your own data and time.
|
|
||||||
|
|
||||||
This is a huge subject on its own and it's really up for everyone to make their own minds up when it comes to their own stance. It's been widely discussed pretty much everywhere, and there are [loads of resources available on this handy website](https://www.quitfacebook.org) if you're interested in understanding more about what goes on behind the scenes on these platforms.
|
|
||||||
|
|
||||||
## Staying in the loop
|
|
||||||
|
|
||||||
Anyway this isn't another post about Facebook, but one of the things that _is_ useful about that particular platform is its birthday reminder system in which you automatically receive an email from Facebook if it happens to be one of your friend's birthdays that day. In itself, this is of course simply a mechanism to try and get you to re-engage with the platform - such as to send your friend a direct message on Messenger or to post something on their timeline.
|
|
||||||
|
|
||||||
However, it is nice to get messages on your birthday, and nice to imagine that someone you only speak to a couple of times a year has the headspace to _remember_ that today is your special day. Even though you both know that it's because Facebook has sent a reminder with an easy CTA.
|
|
||||||
|
|
||||||
The good news is that there are still lots of services that help you remember key events without needing to rely on Facebook. Of course you can set-up calendars (many mail providers have in-built calendar facilities that can sync to your client with CalDAV), but you may want to remember other things too - such as anniversaries, friends' pets' names, that time you helped your cousin move house, and more. Quickly all of this info ends up distributed between a number of systems and becomes hard to look-up and manage (unless you're super organised).
|
|
||||||
|
|
||||||
## Monica: the "Personal Relationship Manager"
|
|
||||||
|
|
||||||
What we need is a personal _CRM_ ("customer relationship manager"), which can do all of this for us. And thankfully such systems exist - such as [Monica](https://www.monicahq.com).
|
|
||||||
|
|
||||||
> Monica is the single best investment you can make to have better relationships. - [monicahq.com](https://www.monicahq.com/pricing).
|
|
||||||
|
|
||||||
Monica is a piece of [open-source software](https://github.com/monicahq/monica) that can handle all of this for you as a "Personal Relationship Manager" (in their words) - and much more. You can sign-up on [their website](https://app.monicahq.com/register) and pay a small ongoing subscription fee to cover the server costs. Alternatively, you can easily self-host it on your own server.
|
|
||||||
|
|
||||||
![The Monica dashboard homepage](/media/blog/monica.png "This is what my Monica homepage looks like")
|
|
||||||
|
|
||||||
I've been using it (the self-hosted option) for some time now, and [love its features](https://github.com/monicahq/monica#features). I get automatic email notifications in-time to remind me about key events, I can keep track of the birthdays of my friends' kids, remember gifts I have been given, friend life events, jobs, and more.
|
|
||||||
|
|
||||||
Although I still want to spend some further time setting it up and adding more details about the people I know, it already helps me to include richer information when I message friends and family and to remember the things I really should be anyway.
|
|
||||||
|
|
||||||
Monica looks great, works fine on my phone web browser as well as my desktop browser, and also has an API that allows you to build your own workflows or to connect it to other services.
|
|
||||||
|
|
||||||
If you find yourself forgetting birthdays and important information about friends and family, or if you just want to log relationships more effectively, then I certainly recommend giving it a go.
|
|
||||||
|
|
||||||
## How to self-host Monica
|
|
||||||
|
|
||||||
I host Monica on a relatively small VPS. It's lightweight and it happily runs alongside a few other services.
|
|
||||||
|
|
||||||
I usually prefer using Docker to host things like this as it helps keep things isolated when running multiple services on the same machine. I have an Nginx container (with several virtual hosts) that proxies requests through to the appropriate services.
|
|
||||||
|
|
||||||
The Monica Team kindly maintain an official [Docker image](https://hub.docker.com/_/monica). I went for the Apache version (as I already have Nginx in-place for TLS, etc.) for which there is an example [Docker Compose](https://docs.docker.com/compose) config available on the official Monica image page. The documentation also explains how to get your first user setup.
|
|
||||||
|
|
||||||
One of the main advantages of Monica is its ability to keep you updated without you needing to login and check-up on things. It does this by sending you emails, and for this to work you'll need to add a bit of extra configuration to your Docker Compose file, as [described on this page](https://github.com/monicahq/monica/blob/master/docs/installation/mail.md). Just add the extra variables to your `environment` section in `docker-compose.yml`. The article mentions Amazon SES, however you can use your own mail provider's SMTP/IMAP server settings here (e.g. [Mailgun](https://www.mailgun.com)).
|
|
||||||
|
|
||||||
If you plan to use [Linode](https://www.linode.com) to host your Monica service (which is a great choice), you may just need to open up a quick support ticket with them so that they can make sure your account is allowed to send traffic on standard email ports (e.g. 25 and 587), which they sometimes restrict on new accounts to help fight spam.
|
|
||||||
|
|
||||||
## Contribute
|
|
||||||
|
|
||||||
If you want to contribute to this great open-source project, then there are [guides available on GitHub](https://github.com/monicahq/monica#contribute).
|
|
@ -1,88 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-10T22:11:00Z"
|
|
||||||
title: "SSH Jumping and Bastion Hosts"
|
|
||||||
description: "Working with secure network architectures with bastion hosts and SSH jumping."
|
|
||||||
tags: [100daystooffload, technology, security]
|
|
||||||
image: header-ssh-jumping-bastion-hosts.png
|
|
||||||
imageDescription: AI generated pixel art of astronauts and cats jumping over computers.
|
|
||||||
slug: ssh-jumping-bastion-hosts
|
|
||||||
---
|
|
||||||
|
|
||||||
For many small or personal services running on a VPS in the cloud, administration is often done by connecting directly to the server via SSH. Such servers should be hardened with firewalls, employ an SSHd config that denies root and password-based login, run [fail2ban](https://www.fail2ban.org), and other services and practices.
|
|
||||||
|
|
||||||
Linode has some [great getting-started guides](https://www.linode.com/docs/guides/securing-your-server) on the essentials of securing your server.
|
|
||||||
|
|
||||||
## Protecting sensitive servers
|
|
||||||
|
|
||||||
In more complex production scenarios heightened security can be achieved by isolating application (webapp, API, database, etc.) servers from external internet traffic. This is usually done by placing these "sensitive/protected" servers in a private [subnet](https://en.wikipedia.org/wiki/Subnetwork), without direct internet-facing network interfaces. This means that the server is not reachable from the outside world.
|
|
||||||
|
|
||||||
In this type of scenario, outbound traffic from the sensitive server can be routed through a [NAT gateway](https://en.wikipedia.org/wiki/Network_address_translation) and inbound traffic can be funnelled through a [load-balancer](https://en.wikipedia.org/wiki/Load_balancing_(computing)) or reverse proxy server. In both these cases the NAT gateway and load-balancer would exist in public subnets (with internet-facing network interfaces) and can reach the sensitive server through private network interfaces in order to forward requests (e.g. web traffic).
|
|
||||||
|
|
||||||
![Diagram of public and private subnets, with a NAT gateway and load balancer](/media/blog/ssh1.png)
|
|
||||||
|
|
||||||
Now the question is around how one _does_ manage the services running on the protected server, since it is no longer available to connect to. Traditionally this is done by introducing _bastion hosts_ into your network.
|
|
||||||
|
|
||||||
## Bastion hosts
|
|
||||||
|
|
||||||
[Bastion hosts](https://en.wikipedia.org/wiki/Bastion_host) - like the NAT gateway and load balancers - sit in the public subnet and so they are available to the outside world. They often accept SSH connections, from which one can "jump" through to the protected servers through the bastion's private networking interface.
|
|
||||||
|
|
||||||
![Adding a bastion host to the cloud infrastructure](/media/blog/ssh2.png)
|
|
||||||
|
|
||||||
Bastion hosts should be hardened as much as possible (with firewalls and other network rules), and should run a limited set of services - in many cases simply SSHd.
|
|
||||||
|
|
||||||
This server then enables administrators to connect through to the protected servers in order to carry out maintenance, upgrades, or other tasks.
|
|
||||||
|
|
||||||
## Connecting through a bastion host
|
|
||||||
|
|
||||||
SSH port-forwarding is a widely-used concept, in which a secure tunnel to a service running on the protected server is opened via a port on the local machine (using `ssh`'s `-L` option).
|
|
||||||
|
|
||||||
Another option is to use proxy _jumping_ (with the `-J` option):
|
|
||||||
|
|
||||||
```shell
|
|
||||||
ssh -J bastion.company.com protected.company-internal.com
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example the user connects via the bastion through to the protected server at `protected.company-internal.com`. Since you should be using key-based authentication to connect, you may also need to specify the private key path (with the `-i` option), and also tell SSH to forward your agent to the bastion (using `-A`) so it can continue the connection.
|
|
||||||
|
|
||||||
This can make things hard to remember each time. You could write the command in a script, however it's probably easier to use SSH's own local configuration. To do so, you can add the following to your local `~/.ssh/config` file:
|
|
||||||
|
|
||||||
```
|
|
||||||
Host *.company-internal.com
|
|
||||||
ProxyJump bastion.company.com
|
|
||||||
User username
|
|
||||||
IdentityFile /home/username/.ssh/identity
|
|
||||||
```
|
|
||||||
|
|
||||||
With that in place you can now simply run the following when you want to connect to the protected server:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
ssh protected.company-internal.com
|
|
||||||
```
|
|
||||||
|
|
||||||
_Note: depending on your system you may need to add the key to your local agent first, but you just need to do this once per login (`ssh-add ~/.ssh/identity`)_.
|
|
||||||
|
|
||||||
## A note on DNS
|
|
||||||
|
|
||||||
Generally I would probably avoid assigning public domain names to a bastion host, as this may invite unwanted attention and traffic (even if the host is secured). Instead you can just include the IP address directly in the `ProxyJump` line of `.ssh/config`. _I used domain names in the examples above to make the process clearer_.
|
|
||||||
|
|
||||||
Also, in the above example I refer to the `company-internal.com` domain for use within the private network. This domain should only be resolvable within members of the private network - either by using an internal DNS server or by simply modifying `/etc/hosts` on the bastion. Alternatively you can just use the private IP address for the protected server on the `Host` line of `.ssh/config`.
|
|
||||||
|
|
||||||
## Additional notes
|
|
||||||
|
|
||||||
In setups like this you may also want to consider the following:
|
|
||||||
|
|
||||||
### Private keys
|
|
||||||
|
|
||||||
Don't provision these on your bastion host. Instead use agent forwarding (as described above). You'll need to add your public keys to both the bastion and protected servers.
|
|
||||||
|
|
||||||
### Restrict source network
|
|
||||||
|
|
||||||
For extra security you can restrict SSH connections to your bastion only from trusted networks (e.g. your office network or a VPN).
|
|
||||||
|
|
||||||
Similarly, restrict protected servers such that they only accept SSH traffic from the bastion, and not from other servers on the network.
|
|
||||||
|
|
||||||
### Make use of managed services if/when possible
|
|
||||||
|
|
||||||
For extra security you can use managed services when they are available. For example, if you use AWS then you can make use of a combination of VPCs, subnets, NAT gateways, elastic load balancing, security groups, Route 53, and other services to secure your hosts and control your network. You can of course set this up on your own servers without relying on managed services.
|
|
||||||
|
|
||||||
Either way, I hope this post has helped shed light on some simple ways to improve network security for your applications and services.
|
|
@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-13T21:17:00Z"
|
|
||||||
title: "The Midnight Library by Matt Haig"
|
|
||||||
description: "Some thoughts on the novel 'The Midnight Library' by Matt Haig, and my personal takeaways."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: midnight-library
|
|
||||||
---
|
|
||||||
|
|
||||||
Last week I read [The Midnight Library](https://www.goodreads.com/book/show/52578297-the-midnight-library) by [Matt Haig](https://www.goodreads.com/author/show/76360.Matt_Haig). The book won the 2020 [Goodreads Choice Award for Fiction](https://www.goodreads.com/award/show/21332-goodreads-choice-award).
|
|
||||||
|
|
||||||
![The Midnight Library cover](/media/blog/midnight-library.jpg)
|
|
||||||
|
|
||||||
"Set" in Bedford, England, the story starts by introducing the main character - Nora Seed - who feels completely down. She is depressed and thinks that she has nothing further to contribute to her own life or to the lives of the few people around her.
|
|
||||||
|
|
||||||
On the day she decides she no longer wants to live, she is fired from her job, her cat dies, and other events occur which help cement her decision. However, as she dies she is transported to a place that exists between life and death: The Midnight Library.
|
|
||||||
|
|
||||||
Here she is presented with the infinite number of books that make up the lives that could have been had she made different choices in the past - whether those were big or small (such as choosing whether to have a tea or coffee) or something more obviously impactful. Either way, they can contribute to a complete change in life direction.
|
|
||||||
|
|
||||||
She has the option to begin living these different lives by considering the _regrets_ she has about the decisions she made in her root life. As she "visits" her other lives she reflects on the decisions that led her to that point, and also realises the power in the choices she makes in their ability to also drastically affect the lives of those around her.
|
|
||||||
|
|
||||||
Whilst I feel that the book was perhaps not as deep as it could have been, it is my opinion that this was an intentional design by the author as it made the experience more of a canvas in order for the reader to make their own reflections.
|
|
||||||
|
|
||||||
Some of the key thought takeaways for me were around the knowledge that whilst the decisions one person makes may benefit them, they may not be beneficial for everyone. Whilst of course it is important to consider the happiness and wellbeing of yourself as well as those affected by your decisions, one needs to live and experience the variety of life without feeling paranoid about making the decisions that you feel are the right ones.
|
|
||||||
|
|
||||||
The book made me reflect on some of my own decisions. I know the grass isn't always greener but that the choices are always there to be made if I want or need a change - it is never too late.
|
|
||||||
|
|
||||||
The premise of the story sounds like it could be depressing, however I did not find that at all. In many ways, it was the complete opposite: having an understanding of the power in your choices helps you realise that even when things feel at their worst, you are not powerless. There is always something you can do to make a change and choices to be made to gear yourself towards where you need to be.
|
|
||||||
|
|
||||||
We all have regrets in our own lives, and decisions we wish we did (or didn't) make, but these should not be dwelled upon or worried about. Instead we can consider them as the useful tools they are to help us make different or better decisions as we look forward and continue into the future.
|
|
@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-18T21:01:00Z"
|
|
||||||
title: "A Year Without Answering my Phone"
|
|
||||||
description: "Why I stopped answering my phone, and what my experience has been like since."
|
|
||||||
tags: [100daystooffload, life, opinion]
|
|
||||||
slug: no-phone-answering
|
|
||||||
---
|
|
||||||
|
|
||||||
This month marks a year from when I decided to (_mostly_ - see below) stop answering my phone. This was not because I wanted to be antisocial (quite the opposite), but because it's become the wrong form of communication for me.
|
|
||||||
|
|
||||||
## Why did I stop?
|
|
||||||
|
|
||||||
Like many people, I am inundated with sales-y and spammy phonecalls. I have had the same mobile phone number since 2001 (that's 20 years this year), which I am sort of proud of and would prefer to keep. However, careless (or malicious) entities over the years (and more than likely mistakes also made by my younger self) have meant that my number and name are now in the databases of many different types of agents - from insurance/legal company sales teams through to dodgy Bitcoin spam companies.
|
|
||||||
|
|
||||||
It got to the point that the signal/noise ratio ("real" phone calls vs. unwanted) probably dropped to around 5%. At first, spam calls were easier to spot (they'd call from random UK cities), but recently calls started to come in from numbers starting with "07" (which designates a mobile number in the UK) and also more and more from the [area code](https://en.wikipedia.org/wiki/List_of_dialling_codes_in_the_United_Kingdom) of the city where I live - probably in the hope of appearing more legitimate to me.
|
|
||||||
|
|
||||||
I also find talking on the phone sort of _stressful_. I'm sure I'm not alone in that the _Phone_ app is probably the least-used part of my smart"phone". For some reason, to me it just doesn't feel natural, and - with the exception of close friends and family (and even them sometimes) - I'd much rather "talk" to people via IM or live text chat.
|
|
||||||
|
|
||||||
I'm naturally pretty introverted so I get on better with channels that enable me to think and formulate comms in my own time.
|
|
||||||
|
|
||||||
Unexpected and unscheduled calls are also pretty _rude_, I think. Stephen Fry sums up essentially what I feel about this [in this short but great clip from QI](https://youtu.be/7xXSw07zrio?t=211) - that phoning someone out of the blue is really the equivalent to going up to that person and yelling at them, "speak to me now, speak to me now, speak to me now". Without caring that they might be busy, stressed, not in the right frame of mind, or any number of other states.
|
|
||||||
|
|
||||||
This is incredibly invasive to do to someone you don't even _know_.
|
|
||||||
|
|
||||||
## What was the result?
|
|
||||||
|
|
||||||
In the end I made a pact that I would no longer answer the phone unless it was a pre-arranged call or from a number that I recognised - and only then close friends and family.
|
|
||||||
|
|
||||||
I feel far more empowered and in control of my own time when I hear/see my phone ring - and I just silence it and let it ring out. The decision has already been made to purposefully miss the call and so there is no need for any anxiety that might accompany such unexpected calls.
|
|
||||||
|
|
||||||
I sometimes choose to avoid calls from numbers I _do_ recognise. These callers (usually businesses I deal with) just follow-up with an email anyway to which I can respond when I'm ready - usually within the hour. If there is an emergency they can leave a voicemail, which I will get notified about and then choose how best to respond. Friends and family either feel the same as me or know me well enough so that I don't need to miss their calls.
|
|
||||||
|
|
||||||
Either way, I haven't (knowingly) missed any events, appointments, insurance renewals, or whatever. I am going to carry on as I have been and I can certainly recommend this approach to you too if you feel the same way as me about unwanted phonecalls.
|
|
@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-20T21:42:00Z"
|
|
||||||
title: "The Glamour of Cyberpunk and the Road to Solarpunk"
|
|
||||||
description: "What is Solarpunk, and can we make it a reality?"
|
|
||||||
tags: [100daystooffload, opinion]
|
|
||||||
slug: solarpunk
|
|
||||||
---
|
|
||||||
|
|
||||||
A few months ago I stumbled across this article: [Beyond Cyberpunk: Towards a Solarpunk Future](https://thedorkweb.substack.com/p/towards-a-solarpunk-future). It was posted on the excellent blog _Tales from the Dork Web_, by Steve Lord, which I can certainly recommend [subscribing to](https://thedorkweb.substack.com/subscribe).
|
|
||||||
|
|
||||||
I had never heard of the term "Solarpunk" before, but I read up more about it and the more I researched the more intrigued I became. Essentially it is defined as - more or less - the _opposite_ to the [Cyberpunk](https://en.wikipedia.org/wiki/Cyberpunk) subculture, and I think we're at a bit of a fork in the road from which either future could become a reality.
|
|
||||||
|
|
||||||
## Cyberpunk
|
|
||||||
|
|
||||||
Cyberpunk (_not_ the game by CD Projekt) is a term that describes a potential future setting that is pretty dystopian: there is a large "wealth gap" between the rich and poor; people live in dark and cramped accommodations, have mostly unhealthy existences, and are governed by a small number of large private corporations. The growth of these companies, however, allows citizens of the Cyberpunk future to be equipped with some pretty nice pieces of technology for communication, leisure & media, travel, automation, and anything else.
|
|
||||||
|
|
||||||
In a nutshell, it's often described as "high-tech, low-life".
|
|
||||||
|
|
||||||
Whilst it sounds (to some?) like a gloomy outlook, I love the dark and lonely imagery, the artwork, stories and subculture that has emerged from other people who are also fascianted by this movement. You've probably seen such scenes yourself in pictures, movies, books, and games that adopt the Cyberpunk setting. The [r/ImaginaryCyberpunk subreddit](https://www.reddit.com/r/ImaginaryCyberpunk) community also often posts excellent and emotive content.
|
|
||||||
|
|
||||||
I love this image: [Oris City by Darko Mitev](https://www.artstation.com/artwork/R3QNee) and I can certainly recommend checking out more of his work and tutorials too. I love all of the atmosphere and detail.
|
|
||||||
|
|
||||||
Despite the "glamour", interesting and exciting stories and movies, politics, and other cultural pieces that emerge from it, Cyberpunk describes a gloomy future that I imagine most people do not want to actually experience.
|
|
||||||
|
|
||||||
## Solarpunk
|
|
||||||
|
|
||||||
I think we're at a bit of a weird, but pivotal, point in time right now - from (geo-)political, societal and technological perspectives - in that the Cyberpunk dystopia is becoming a little unblurred. With ever-mounting consumerism, capitalism, bad choices regarding energy production, mass surveillance (from both private companies and governments), and much more, our reality certainly feels as though it is moving towards a point where some of the elements that comprise Cyberpunk do not feel too far-fetched at all.
|
|
||||||
|
|
||||||
The present feels pivotal because whilst there are excellent efforts being made to reverse some of these positions around the world (from local recycling schemes and zero-waste manufacturers through to fights for human rights and rallies around liberal activists), these processes only become effective and impactive if they are considered and actioned by society _as a whole_. While there are are still enough members that continue to wallow in seemingly-backward ideologies and refuse to become involved or make any of the needed adjustments, then change as a society cannot happen.
|
|
||||||
|
|
||||||
However, on a more positive note, if such challenges can be solved - and the right choices made now and in the near future - then a whole new potential future opens its doors: one that might be described as _Solarpunk_.
|
|
||||||
|
|
||||||
In a [Solarpunk future](https://en.wikipedia.org/wiki/Solarpunk) humanity is much more in-tune with the world around it, maintaining a focus on sustainability (in terms of energy production, consumerism, ecology, and _education_), locality (in terms of sourcing materials and food, manufacturing, and the "do it yourself" movement), and - perhaps most importantly - an _attitude_ that promotes sharing and positivity.
|
|
||||||
|
|
||||||
To me it's not "hippyish" or necessarily to do with the adoption of socialism or the outright rejection of capitlism and associated ideologies - it's more concerned with sensible _balances_ across many facets of society and its politics. Competitiveness and drives to "do better" are parts of what make us human, and can very much live hand-in-hand with the other points and aesthetics we're talking about here.
|
|
||||||
|
|
||||||
Nor is it a rejection of technology. In fact, from a technological perspective, forward-thinking efforts surrounding the [free and open-source software](https://en.wikipedia.org/wiki/Free_and_open-source_software) movement and privacy-first companies are certainly components I see that can help contribute to (and become a focus within) a more sustainable and fair world. Technology can continue to innovate, develop, and improve in either setting.
|
|
||||||
|
|
||||||
> Solarpunk isn’t about doing your bit to save the world from climate collapse. Solarpunk is about building the world you want your grandchildren to grow old in. - [Steve Lord](https://thedorkweb.substack.com/p/towards-a-solarpunk-future)
|
|
||||||
|
|
||||||
We've already seen some fantastic real-world efforts that can be considered part of this movement - from architecture and transport through to self-repair and home agriculture. I love the [bottle farm](https://containergardening.wordpress.com/2011/09/07/bottle-tower-gardening-how-to-start-willem-van-cotthem) idea included in the post I mentioned at the start of this article, and want to try this myself.
|
|
||||||
|
|
||||||
There are also the more obvious reflections, such as to fully embrace solar energy (and other renewables) as a source of power - both at an individual and industrial scale - and efforts concerned with maintaining green spaces in developing and urban areas. I think that the more mainstream and ubiquitous we can make all of these actions the more realistic a Solarpunk world can become.
|
|
||||||
|
|
||||||
--
|
|
||||||
|
|
||||||
_Note: this article only scratches the surface of the Cyberpunk and Solarpunk subcultures. It is aimed to be more of a primer to introduce the concepts behind these ideas and to perhaps pique the interest of readers enough to continue their own research._
|
|
@ -1,79 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-24T13:46:00Z"
|
|
||||||
title: "Migrating from Google Photos: Nextcloud, Piwigo, Mega, and pCloud"
|
|
||||||
description: "My experiences with trying to move away from Google Photos."
|
|
||||||
tags: [100daystooffload, technology, opinion]
|
|
||||||
image: header-google-photos-pcloud.png
|
|
||||||
imageDescription: AI generated pixel art representing photo apps on smartphones.
|
|
||||||
slug: google-photos-pcloud
|
|
||||||
---
|
|
||||||
|
|
||||||
By now I'm sure everyone has heard the horror stories about people (seemingly-) randomly losing access to their Google accounts. Often the account closures are reported to have been accompanied with vague automated notifications from Google complaining that the account-holder violated their terms in some way, but without any specific details or an offer of appeal or process to resolve the "issues" and reinstate the accounts.
|
|
||||||
|
|
||||||
As such, these events usually mark the end of the road for the victims' presence and data on Google platforms - including Gmail, Drive, Photos, YouTube - without having any option to extract the data out first. This could be years' worth of documents, family photos, emails, Google Play purchases, and much more (ever used "Sign in with Google" on another service, for example?).
|
|
||||||
|
|
||||||
Some affected people are fortunate to have a large social media following to ensure that their posts describing this treatment can traverse the networks enough to get through to someone close to Google who can try and elevate it in order to get accounts reinstated. However, for most people this is not possible.
|
|
||||||
|
|
||||||
The [creator of Stardew Valley](https://twitter.com/Demilogic) recently [found himself locked out of his 15-year-old Google account](https://twitter.com/Demilogic/status/1358661840402845696) - even whilst involved in a key ongoing deal with Stadia, which he has since pulled out from due to feeling mal-treated. There are many similar stories available, and probably thousands more we never hear about.
|
|
||||||
|
|
||||||
Of course, I am sure there are legitimate reasons for many accounts to be removed and that the original intentions behind these automated systems were good. Either way, this still just worries me. Whilst I haven't (at least, I don't think?) done anything to violate any terms, I just don't want to take the risk and wake up one morning to find I have lost 15 years' worth of emails, photos, and documents.
|
|
||||||
|
|
||||||
## What are the alternatives?
|
|
||||||
|
|
||||||
These days there are so many services that compete with Google's own offerings. For example, [DuckDuckGo](https://duckduckgo.com) is excellent for web search - though several times a day I do need to revert to Google search for more complex queries (which I can do by prepending DuckDuckGo search queries with `g!`). There are [many websites that list good alternatives to Google services](https://restoreprivacy.com/google-alternatives), and I won't bang on about these ideas here - it's up to you what you prefer to use, of course, and this type of thing has been covered many times before.
|
|
||||||
|
|
||||||
Personally, I've used my own domain to send and receive email (using [Fastmail](https://www.fastmail.com)) for several years now, and self-host my files and documents using [Nextcloud](https://nextcloud.com). I don't really use YouTube or have much data tied-up in the other Google offerings.
|
|
||||||
|
|
||||||
However the one service I do rely on still is Google Photos. To be fair, this is a fantastic service - the apps seamlessly back everything up, the search is great (it's Google's bread-and-butter, after all), and I can easily and instantly find specific photos from two decades ago, or from any time in-between. It's also super fast. I'd never found a good-enough replacement for media storage and so I never made the leap.
|
|
||||||
|
|
||||||
## The problems with media storage
|
|
||||||
|
|
||||||
Images and videos - espcially with modern cameras and phones - take up a _huge_ amount of space. I take a few pictures every day, and on my messenger apps I sometimes like to save images I receive from friends and family too.
|
|
||||||
|
|
||||||
This has resulted in a collection of over 84,000 pictures and videos in a mostly-continuous stream since 1998 - the year our family got our first digital camera. There are also digitised versions of photos from as early as 1959 on there too. Whilst this is not a massive collection by any standards these days, it forms a significant part of my own data footprint.
|
|
||||||
|
|
||||||
Whilst I was happy with using Google for this one area, I would get so nervous every time I read one of those "deleted accounts" stories that it got to the point where last month I finally committed to making a change.
|
|
||||||
|
|
||||||
In the meantime I needed to try and get my stuff out of Google Photos. The service lets you download 500 images at a time from the web interface, but that would have taken forever. The other option was to use [Google Takeout](https://google.com/takeout). I did this and shortly after received an email containing links to download 48 different archives of data.
|
|
||||||
|
|
||||||
![Email from Google Takout listing lots of download buttons](/media/blog/google-takeout-email.png)
|
|
||||||
|
|
||||||
When I downloaded a couple of examples, I saw that they seemed to contain a lot of JSON metadata files and not many actual photos. I imagined I'd have to download the whole lot to try and make sense of it all and manually piece bits together. I thought I'd leave that for now whilst I continued my search for an alternative service, and come back to that problem later.
|
|
||||||
|
|
||||||
## The search for a Google Photos alternative
|
|
||||||
|
|
||||||
The first job was to identify a new process/system for media storage. I had a few acceptance criteria in mind;
|
|
||||||
|
|
||||||
- It needed to be affordable (not necessarily as cheap as [Google One](https://one.google.com), but not bank-breaking either).
|
|
||||||
- I needed it to be quickly and easily navigable (i.e. to easily move to a particular date to find photos).
|
|
||||||
- It had to have some type of auto-sync from my phone's photo gallery (I am too lazy to remember to manually "back-up" things - I need automation!).
|
|
||||||
|
|
||||||
I was already using Nextcloud for my documents anyway, and the Nextcloud app (which is brilliant) also includes an auto-upload feature for photos. However, I find Nextcloud gets a bit slow and grinds my server to a halt when viewing (I guess it processes things in the background?) large and recently-uploaded photos. Also, my VPS provider's pricing (not unreasonable) would mean forking out the best part of $400 a year for the required block storage - which would only increase every year as my library gets bigger.
|
|
||||||
|
|
||||||
I also considered [Piwigo](https://piwigo.org), which looks great and is [reported to be very fast](https://piwigo.org/testimonials). However the self-hosted option would have the same pricing implications as Nextcloud (above), and the hosted offering would be [significantly more](https://piwigo.com/pricing) if I was to include videos too. I think Piwigo is aimed more at photographers maintaining and sharing albums rather than for use as a personal photo storage solution.
|
|
||||||
|
|
||||||
I [recently tooted](https://fosstodon.org/web/statuses/105692084325464954) out to the community about this problem and I got some great responses back. One idea in particular caught my eye: [Mega](https://mega.nz). I had used Mega before a while back, and the apps and web interfaces seemed to have come on a long way in recent years. After a bit of research I decided to choose this option. It seemed secure (with client-side encryption), quick, and the apps had the features I needed.
|
|
||||||
|
|
||||||
I went to pay for Mega (using the web browser), and it redirected me to a very dodgy-looking payment website - this threw me a little. I went back to the checkout page to see if I had clicked the wrong thing, clicked "confirm" again, and this time it took me to an entirely _different_ (but still sort of dodgy-looking) payment site. I've set-up [Stripe](https://stripe.com) a few times before, and know it's pretty trivial these days to accept card payments on your own site, and so alarm bells began to ring a little. My paranoid security-focused self was put off enough to continue my search.
|
|
||||||
|
|
||||||
## Migrating to pCloud
|
|
||||||
|
|
||||||
That's when I stumbled upon the Swiss-based [pCloud](https://www.pcloud.com) on a Reddit thread discussing storage alternatives. It seems to be pretty feature-matched with Mega, despite not offering client-side encryption out-of-the-box - but then neither does Google Photos. Additionally, pCloud offers both US and European servers.
|
|
||||||
|
|
||||||
pCloud's apps have similar functions to Mega, and the service also has the added bonus of offering a Google Drive integration! Hopefully this would mean I wouldn't need to spend ages traversing that Google Takeout mess. The service also offers integrations with Dropbox, OneDrive, and some social networking platforms.
|
|
||||||
|
|
||||||
I signed-up and paid - without needing to go to any dodgy sites. I then linked my Google account and waited for the magic to happen.
|
|
||||||
|
|
||||||
It was a little slow. I know there was a fair amount of data, and I imagine the combination of this plus Google rate-limiting and other factors contributed to the speed too. I checked every few hours on the progress; there's a sort of indicator (a folder count), but otherwise there was no way to really check what was going on. After a couple of days I noticed it had stopped (or "aborted") by itself.
|
|
||||||
|
|
||||||
![Screenshot of pCloud, showing Google Drive import aborted](/media/blog/pcloud-google-drive.png)
|
|
||||||
|
|
||||||
I had a quick browse through what pCloud had brought through and could see it had got to around July 2019 before it had had enough. This was OK - it had imported the vast majority and I was happy enough to run through the last couple of years' worth of content on Google Photos, downloading 500 photos at a time to manually upload to pCloud in order to plug the gap.
|
|
||||||
|
|
||||||
I then un-linked my Google account from pCloud. I turned off Google Photos auto-upload from my phone and instead all new media now gets auto-uploaded to pCloud. Job done.
|
|
||||||
|
|
||||||
## Final thoughts
|
|
||||||
|
|
||||||
pCloud's navigation seems to be pretty quick, and uploading content is also very fast. It's not _perfect_, though (is anything?) - viewing photos on the app can take a few seconds to generate/retrieve thumbnails, and it doesn't have the smoothness that Google Photos offers.
|
|
||||||
|
|
||||||
However, it's great for now. I have a _"tangible"_ folder of media that feels more portable in case I ever need to move again. pCloud also has clear channels for communication if I do ever need to get in touch, and I certainly feel as though I am less subject to automated judgments from unruly algorithms.
|
|
@ -1,153 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-02-28T22:05:00Z"
|
|
||||||
title: "Making your Python Flask app serverless"
|
|
||||||
description: "How you can deploy your existing Flask app on a scalable serverless architecture."
|
|
||||||
tags: [100daystooffload, technology, python]
|
|
||||||
image: header-flask-serverless.png
|
|
||||||
imageDescription: Pythons, flasks, and laptops!
|
|
||||||
slug: flask-serverless
|
|
||||||
---
|
|
||||||
|
|
||||||
Python's [Flask framework](https://flask.palletsprojects.com) is an easy and excellent tool for writing web applications. Its in-built features and ecosystem of supporting packages let you create extensible web APIs, handle data and form submissions, render HTML, handle websockets, set-up secure account-management, and much more.
|
|
||||||
|
|
||||||
It's no wonder the framework is used by individuals, small teams and all the way through to large enterprise applications. A very simple, yet still viable, Flask app with a couple of endpoints looks as follows.
|
|
||||||
|
|
||||||
```python
|
|
||||||
from flask import Flask
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
|
||||||
|
|
||||||
@app.route('/')
|
|
||||||
def hello_world():
|
|
||||||
return 'Hello, World!'
|
|
||||||
|
|
||||||
@app.route('/<name>')
|
|
||||||
def greet(name):
|
|
||||||
return 'Hello, ' + name
|
|
||||||
```
|
|
||||||
|
|
||||||
Flask apps like this can easily be deployed to a server (e.g. a VPS) or to an app-delivery service (e.g. [Heroku](https://www.heroku.com), [AWS Elastic Beanstalk](https://aws.amazon.com/elasticbeanstalk), and [Digital Ocean App Platform](https://www.digitalocean.com/products/app-platform)). In these scenarios, the server/provider often charges the developer for each hour the app is running. Additionally, as traffic increases or reduces, the provider can automatically scale up and down the resources powering your app in order to meet demand. However this scaling can sometimes be a slow process and also means that the developer is charged even when the app is not being used.
|
|
||||||
|
|
||||||
If you want to set-up your app so that it can automatically scale from 0 to thousands of concurrent users almost instantly, where you are not charged when users aren't using your app, where it is highly-available (keep up your uptime to meet SLAs), and where there is no server set-up or maintenance required (and there is nothing for bad actors to try and SSH into), then migrating to a more serverless architecture might be of interest to you.
|
|
||||||
|
|
||||||
Also, given that most providers offer a pretty generous free tier for serverless apps, you may not end up paying much at all (up to a few dollars max a month) until you start generating enough traffic.
|
|
||||||
|
|
||||||
_Note: in this article I use Flask as an example, however the same should apply to any WSGI-compatible framework, such as Bottle and Django, too._
|
|
||||||
|
|
||||||
## What is a serverless web app?
|
|
||||||
|
|
||||||
"Serverless" is the generic term for a family of cloud-based execution models where the developer does not need to worry about provisioning, managing, and maintaining the servers that run their application code. Instead, the developer can focus on writing the application and can rely on the cloud _provider_ to provision the needed resources and ensure the application is kept highly-available.
|
|
||||||
|
|
||||||
Although services such as Heroku and [Digital Ocean App Platform](https://www.digitalocean.com/products/app-platform) can be considered "serverless" too (in that there is no server to configure by the developer), I refer more to delivery via _function as a service_ as the particular serverless model of interest in this article, since this offers the benefits listed at the end of the previous section.
|
|
||||||
|
|
||||||
"Function as a service" (FaaS) - as its name suggests - involves writing _functions_, which are deployed to a FaaS provider and can then be _invoked_. Such systems are _event-driven_, in that the functions are called as a result of a particular event occurring - such as on a periodic schedule (e.g. a cron job) or, in the web application case, an HTTP request.
|
|
||||||
|
|
||||||
There are many FaaS providers, such as [Azure Functions](https://docs.microsoft.com/en-us/azure/azure-functions/functions-overview), [Google Cloud Functions](https://cloud.google.com/functions), [Cloudflare Workers](https://workers.cloudflare.com), and [IBM Cloud Functions](https://www.ibm.com/cloud/functions).
|
|
||||||
|
|
||||||
Probably the most famous (and first major) FaaS provider offering is [AWS Lambda](https://aws.amazon.com/lambda). In this article I will focus on using Lambda as the tool for deploying Flask apps, but many of the concepts discussed are generic across providers.
|
|
||||||
|
|
||||||
Serverless apps written using AWS Lambda usually also involve [Amazon API Gateway](https://aws.amazon.com/api-gateway/features), which handles the HTTP request/response side of things and passes the information through as code to the Lambda function. The `event` argument received by the function describes - among other things - the information about the request that can be used to generate an appropriate response, which is then returned by the function.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import json
|
|
||||||
|
|
||||||
def lambda_handler(event, context):
|
|
||||||
name = event['queryStringParameters']['name']
|
|
||||||
return {
|
|
||||||
"statusCode": 200,
|
|
||||||
"headers": {"Content-Type": "application/json"},
|
|
||||||
"body": json.dumps({"Hello": name})
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
As long as your function(s) return a valid object to generate a response from API Gateway, applications on Lambda can use a separate function for each request path and method combination, or use one function for _all_ invocations from API Gateway and use the `event` parameter and code logic to provide the needed actions.
|
|
||||||
|
|
||||||
Either way, this is a different pattern to how Flask structures its functions, requests, and responses. As such, we can't simply deploy our Flask app as-is to Lambda. I'll now talk about how we _can_ do it without too much extra work.
|
|
||||||
|
|
||||||
## Using Serverless framework to describe a basic app
|
|
||||||
|
|
||||||
The [Serverless framework](https://www.serverless.com), along with its extensive [library of plugins](https://www.serverless.com/plugins), is a well-established tool for provisioning serverless applications on a number of providers. It bundles your code and automates the deployment process, making it easy to create a serverless app.
|
|
||||||
|
|
||||||
Configuration of apps deployed using Serverless is done through the `serverless.yml` file. The example configuration below would, when deployed, create an API Gateway interface and a Lambda function using the code in `app.py`, and would invoke the `lambda_handler` function (above) each time a `GET` request is made to `/hello`:
|
|
||||||
|
|
||||||
```yml
|
|
||||||
service: my-hello-app
|
|
||||||
|
|
||||||
provider:
|
|
||||||
name: aws
|
|
||||||
runtime: python3.8
|
|
||||||
region: eu-west-1
|
|
||||||
memorySize: 512
|
|
||||||
|
|
||||||
functions:
|
|
||||||
hello:
|
|
||||||
handler: app.lambda_handler
|
|
||||||
events:
|
|
||||||
- http:
|
|
||||||
path: hello
|
|
||||||
method: get
|
|
||||||
```
|
|
||||||
|
|
||||||
## Deploying an existing Flask app to AWS Lambda
|
|
||||||
|
|
||||||
The good news is that we can also leverage the Serverless framework to deploy Flask apps - and without needing much change to the existing project. This section assumes that you have an AWS account already that you can use. If not, then you can sign-up from [their website](https://aws.amazon.com).
|
|
||||||
|
|
||||||
First off, we need to install the Serverless framework itself. This can be achieved through NPM: `npm install -g serverless`.
|
|
||||||
|
|
||||||
Next, we need to configure credentials that will allow Serverless to interact with your AWS account. To do so, use the IAM manager on the AWS console to generate a set of keys (an access key and secret access key) and then use the following command to configure Serverless to use them:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
serverless config credentials --provider aws --key <ACCESS_KEY> --secret <SECRET_ACCESS_KEY>
|
|
||||||
```
|
|
||||||
|
|
||||||
While you should try and restrict access as much as possible, the fastest (yet riskiest) approach is to use an IAM user with Administrator Access permissions. If you want to configure more security I recommend reading the [Serverless docs](https://www.serverless.com/blog/abcs-of-iam-permissions).
|
|
||||||
|
|
||||||
Once the above groundwork has been completed, you can proceed to create a new `serverless.yml` file in the root of your Flask project:
|
|
||||||
|
|
||||||
```yml
|
|
||||||
service: my-flask-app
|
|
||||||
|
|
||||||
provider:
|
|
||||||
name: aws
|
|
||||||
runtime: python3.8
|
|
||||||
|
|
||||||
plugins:
|
|
||||||
- serverless-wsgi
|
|
||||||
|
|
||||||
functions:
|
|
||||||
api:
|
|
||||||
handler: wsgi_handler.handler
|
|
||||||
events:
|
|
||||||
- http: ANY /
|
|
||||||
- http: ANY {proxy+}
|
|
||||||
|
|
||||||
custom:
|
|
||||||
wsgi:
|
|
||||||
app: app.app
|
|
||||||
```
|
|
||||||
|
|
||||||
Don't worry too much about the `wsgi_handler.handler` and `events` parts - essentially these ensure that all HTTP requests to the service will get routed through to your app via a special handler that Serverless will setup for us.
|
|
||||||
|
|
||||||
This setup assumes your root Flask file is named `app` and that your Flask instance within this file is also named `app` (in the `custom.wsgi` attribute above), so you may need to change this if it doesn't match your project setup.
|
|
||||||
|
|
||||||
Another thing to note is the new `plugins` block. Here we declare that our application requires the [`serverless-wsgi`](https://www.serverless.com/plugins/serverless-wsgi) plugin, which will do much of the heavy lifting.
|
|
||||||
|
|
||||||
To make use of the plugin, you'll need to add it to your project as a dependency by running `serverless plugin install -n serverless-wsgi`. As long as your Flask project dependencies are listed in a `requirements.txt` file, you can now deploy your app by simply running `serverless deploy`. After a few minutes, the framework will complete the deployment and will print out the URL to your new service.
|
|
||||||
|
|
||||||
## Tweaking the deployment
|
|
||||||
|
|
||||||
There are various ways to adjust the environment of your deployed service. For example, you can change the amount of memory assigned to your function, make use of environment variables (e.g. for database connection strings or mail server URLs), define roles for your functions to work with other AWS services, and much more.
|
|
||||||
|
|
||||||
I recommend taking a look at the [Serverless documentation](https://www.serverless.com/framework/docs/providers/aws/guide/serverless.yml) to understand more about what options are available.
|
|
||||||
|
|
||||||
If you want to use a custom domain for your service, then you can either set this up yourself in API Gateway through the AWS console or by using the [`serverless-domain-manager`](https://github.com/amplify-education/serverless-domain-manager) plugin. Either way you will need to have your domain managed using [Route 53](https://aws.amazon.com/route53).
|
|
||||||
|
|
||||||
## Serverless caveats
|
|
||||||
|
|
||||||
Whilst the benefits offered by serverless delivery are strong, there are also some things to bear in mind - particularly when it comes to avoiding unexpected costs. Lambda functions bill per 100 milliseconds of execution time, and so long-running functions may be cut short (unless you tweak the duration allowance on the Lambda function, which can be up to 5 minutes long).
|
|
||||||
|
|
||||||
Additionally, if your Flask app makes use of concurrency (e.g. if you use threads to background longer-running tasks, like email-sending), then this may not play nicely with Lambda, since the function may get terminated once a response is generated and returned.
|
|
||||||
|
|
||||||
I outlined some extra things to watch out for [in a recent article](/blog/2021/01/03/scaling-serverless), so take a look through that if you want to read more on these.
|
|
||||||
|
|
||||||
Generally speaking, however, serverless apps are quite a cheap and risk-free way to experiment and get early prototypes off the ground. So, if you're familiar with Flask (or other WSGI frameworks) and want an easy and scalable way to deploy your app, then perhaps this approach could be useful for your next project.
|
|
@ -1,197 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-04T22:17:00Z"
|
|
||||||
title: Easily set up discoverable RSS feeds on a Gatsby website
|
|
||||||
description: "How to set up multiple discoverable RSS feeds for your static Gatsby website."
|
|
||||||
tags: [100daystooffload, technology, javascript]
|
|
||||||
slug: gatsby-rss
|
|
||||||
---
|
|
||||||
|
|
||||||
RSS has had a [bit of a resurgence](/blog/2021/02/03/rss-rise-fall-rise) for personal websites and blogs in recent years, especially with the growing adoption of [Small Web](https://ar.al/2020/08/07/what-is-the-small-web) and [IndieWeb](https://indieweb.org) ideologies.
|
|
||||||
|
|
||||||
Many static site generators - including [Hugo](https://gohugo.io), [Jekyll](https://jekyllrb.com), and [Eleventy](https://www.11ty.dev) - can easily support the automatic generation of RSS feeds at build time (either directly, or through plugins).
|
|
||||||
|
|
||||||
The same is true for [Gatsby](https://www.gatsbyjs.com) - the framework currently used to build this static website - and the good news is that setting up one feed, or multiple ones for different categories, only takes a few minutes.
|
|
||||||
|
|
||||||
## Your Gatsby blog structure
|
|
||||||
|
|
||||||
This article talks about RSS feeds for blogs (a typical use-case), but is also relevant for other notes, podcasts, or anything else that is published periodically to your Gatsby site.
|
|
||||||
|
|
||||||
In Gatsby, the typical blog set-up involves the blog entries in markdown format, and a [template "page"](https://www.gatsbyjs.com/docs/tutorial/part-seven), which is used to render the markdown blog posts.
|
|
||||||
|
|
||||||
You'll also probably have a "blog" page which lists or paginates your posts for visitors to find them, and a `createPages` function in your `gatsby-node.js` that generates the pages from the template and markdown.
|
|
||||||
|
|
||||||
All this sounds way more complicated than it is in practice, and there are lots of [guides available](https://blog.logrocket.com/creating-a-gatsby-blog-from-scratch) to help set this up.
|
|
||||||
|
|
||||||
At the very least, this article assumes you have blog posts written in a directory containing markdown for each post similar to the following:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
---
|
|
||||||
date: "2021-03-04T22:17:00Z"
|
|
||||||
title: "Easily set up discoverable RSS feeds on a Gatsby website"
|
|
||||||
description: "How to set up multiple discoverable RSS feeds for your static Gatsby website."
|
|
||||||
tags: [100daystooffload, technology, javascript]
|
|
||||||
---
|
|
||||||
|
|
||||||
The post content starts here...
|
|
||||||
```
|
|
||||||
|
|
||||||
The metadata (frontmatter) doesn't need to be exactly as shown, but having useful metadata (e.g. tags) in-place helps make your feeds richer.
|
|
||||||
|
|
||||||
## Creating your feeds
|
|
||||||
|
|
||||||
To create the feeds, we'll use a Gatsby plugin called [`gatsby-plugin-feed`](https://www.gatsbyjs.com/plugins/gatsby-plugin-feed), which will do most of the heavy-lifting for us (as long as you have a blog in place structured similarly to the way described above).
|
|
||||||
|
|
||||||
First off, add the plugin as a dependency: `yarn add gatsby-plugin-feed`. I also recommend installing `moment` to help with formatting dates for the feed (as we'll see later): `yarn add moment`.
|
|
||||||
|
|
||||||
Next, you'll need to create some code in `gatsby-config.js`. If you have a blog already then you likely already have content in this file (e.g. `gatsby-source-filesystem` configuration). Your file probably looks a little like the following:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
module.exports = {
|
|
||||||
siteMetadata: {
|
|
||||||
title: 'My Cool Website',
|
|
||||||
siteUrl: 'https://my.cool.website',
|
|
||||||
},
|
|
||||||
plugins: [
|
|
||||||
{
|
|
||||||
resolve: 'gatsby-source-filesystem',
|
|
||||||
options: { ... },
|
|
||||||
},
|
|
||||||
'gatsby-plugin-react-helmet',
|
|
||||||
],
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
Along with any other plugins you may have.
|
|
||||||
|
|
||||||
To create the feed we'll make use of a GraphQL query, and a function which will create a feed object. If we define these separately (as below), it will give us more flexibility later.
|
|
||||||
|
|
||||||
In the same file (`gatsby-config.js`), at the top, first `require` the `moment` library we installed earlier, define the query we'll use, and a function to create a feed object:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
const moment = require('moment');
|
|
||||||
|
|
||||||
// Query for all blog posts ordered by filename (i.e. date) descending
|
|
||||||
const rssPostQuery = `
|
|
||||||
{
|
|
||||||
allMarkdownRemark(
|
|
||||||
sort: { order: DESC, fields: [fileAbsolutePath] },
|
|
||||||
filter: { fields: { slug: { regex: "/blog/" } } }
|
|
||||||
) {
|
|
||||||
edges {
|
|
||||||
node {
|
|
||||||
html
|
|
||||||
fields { slug }
|
|
||||||
frontmatter {
|
|
||||||
title
|
|
||||||
description
|
|
||||||
date
|
|
||||||
tags
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`;
|
|
||||||
|
|
||||||
const createRssPost = (edge, site) => {
|
|
||||||
const { node } = edge;
|
|
||||||
const { slug } = node.fields;
|
|
||||||
return Object.assign({}, edge.node.frontmatter, {
|
|
||||||
description: edge.node.description,
|
|
||||||
date: moment.utc(`${node.frontmatter.date}`, 'YYYY/MM/DDTHH:mmZ').format(),
|
|
||||||
url: site.siteMetadata.siteUrl + slug,
|
|
||||||
guid: site.siteMetadata.siteUrl + slug,
|
|
||||||
custom_elements: [{ "content:encoded": edge.node.html }],
|
|
||||||
});;
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
The `rssPostQuery` assumes your blog posts are rendered at `/blog/filename` in your built site. If not, then just change this value in the regex. Likewise, the `createRssPost` function assumes the dates in the frontmatter of your posts are formatted like `YYYY/MM/DDTHH:mmZ` - if not, just change this string to match your own format (I use UTC here as we're dealing with global audiences!).
|
|
||||||
|
|
||||||
Essentially, the GraphQL query string returns all markdown files ordered by descending filename (I title my blog posts by date, so this gives a reverse chronological ordering of posts, with the newest first), and gives us the post content, slug ("path"), and selected fields from the posts' frontmatters.
|
|
||||||
|
|
||||||
We use a regex in the query to discern between different types of markdown files. For example, you may have a collection of notes - also written in markdown - which we want to ignore for the purposes of creating an RSS feed for _just_ blog posts.
|
|
||||||
|
|
||||||
The `createRssPost` function (which we'll call later), accepts a markdown file (`edge`) and information about the website (`site`), and returns a fresh object representing this information to be eventually embedded in the feed.
|
|
||||||
|
|
||||||
The `guid` field is a globally-unique ID for this post on your blog and reader software will use this to, for example, determine if the user has already seen the post and should mark it as "read". Since all of my posts have a unique path ("slug"), I just use this for the ID.
|
|
||||||
|
|
||||||
Finally, we need to add a section to our `plugins` array to tell `gatsby-plugin-feed` how to build our feed using the query and function we created above. In the same file, make the following changes:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
module.exports = {
|
|
||||||
siteMetadata: { ... }, // omitted for brevity
|
|
||||||
plugins: [
|
|
||||||
{
|
|
||||||
resolve: 'gatsby-source-filesystem',
|
|
||||||
options: { ... }, // omitted for brevity
|
|
||||||
},
|
|
||||||
{ // Add this object to your "plugins" array:
|
|
||||||
resolve: 'gatsby-plugin-feed',
|
|
||||||
options: {
|
|
||||||
feeds: [
|
|
||||||
{
|
|
||||||
serialize: ({ query: { site, allMarkdownRemark } }) =>
|
|
||||||
allMarkdownRemark.edges.map(e => createRssPost(e, site)),
|
|
||||||
query: rssPostQuery,
|
|
||||||
output: '/rss.xml;,
|
|
||||||
title: 'My Cool Blog',
|
|
||||||
description: 'All of my blog posts'
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
...
|
|
||||||
],
|
|
||||||
};
|
|
||||||
```
|
|
||||||
|
|
||||||
The `gatsby-plugin-feed` plugin only runs when the site is actually _built_. If you have your Gatsby site running locally, just run `gatsby build` in a separate Terminal window and then navigate to `/rss.xml` on your local development website to view the feed.
|
|
||||||
|
|
||||||
## Creating multiple feeds
|
|
||||||
|
|
||||||
The example configuration in the previous section creates a single feed containing all blog posts.
|
|
||||||
|
|
||||||
However, you may have noticed that the `feeds` attribute is an array; this means that the plugin can be used to create multiple feeds. I do exactly that on [this website](/feeds): I have different feeds for different audiences (e.g. for technology, life, books, etc.).
|
|
||||||
|
|
||||||
Since we've already broken our code out into a separate query and function, it is easy to add new feeds by `filter`ing on the markdown edges before passing them to `map` in the `serialize` function.
|
|
||||||
|
|
||||||
If you modify the same file again (`gatsby-config.js`), you can create a feed for all of your posts that contain a tag named "technology" as follows:
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
... // omitted for brevity
|
|
||||||
{
|
|
||||||
resolve: 'gatsby-plugin-feed',
|
|
||||||
options: {
|
|
||||||
feeds: [
|
|
||||||
{ ... }, // omitted for brevity
|
|
||||||
{
|
|
||||||
serialize: ({ query: { site, allMarkdownRemark } }) =>
|
|
||||||
allMarkdownRemark.edges.filter(e => {
|
|
||||||
const tags = e.node.frontmatter.tags;
|
|
||||||
return tags && tags.length > 0 && tags.indexOf('technology') > -1;
|
|
||||||
}).map(e => createRssPost(e, site)),
|
|
||||||
query: rssPostQuery,
|
|
||||||
output: '/technology.xml',
|
|
||||||
title: 'My Technology Blog',
|
|
||||||
description: 'Posts in my blog tagged with "technology".'
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create a new feed at `/technology.xml` containing these tech posts.
|
|
||||||
|
|
||||||
Since it's just plain old JavaScript, you can use any of the available information to craft a number of flexible feeds for your visitors to subscribe to. You can then list these feeds on a page on your site, like [this one](/feeds).
|
|
||||||
|
|
||||||
## Feed discovery
|
|
||||||
|
|
||||||
The `gatsby-plugin-feed` plugin has one more trick up its sleeve: without any extra work it will automatically inject the relevant `<link />` tags to your site's HTML at build-time to list the feeds that you have configured.
|
|
||||||
|
|
||||||
This means that your visitors just need to add your site's root URL (e.g. "https://my.cool.website") into their feed reader and it will suggest the available feeds to them.
|
|
||||||
|
|
||||||
![A screenshot showing the Reeder app auto-listing my website's feeds](/media/blog/reeder-feeds.png)
|
|
||||||
|
|
||||||
The image above shows the [Reeder macOS app](https://www.reederapp.com) automatically listing the available feeds on my website after entering just the root URL for the site. Visitors can then just add the ones they want.
|
|
@ -1,70 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-08T19:23:00Z"
|
|
||||||
title: "Thoughts on minimalism, and what happens when I get mail"
|
|
||||||
description: "My processes for handling physical mail, receipts, and other paperwork."
|
|
||||||
tags: [100daystooffload, life, technology]
|
|
||||||
slug: getting-mail
|
|
||||||
---
|
|
||||||
|
|
||||||
## Minimising possessions
|
|
||||||
|
|
||||||
Like many people I these days try and live a minimal life when it comes to possessions. Having more _stuff_ means there is a greater level of responsibility required to look after it. I love the principles involved in "owning less".
|
|
||||||
|
|
||||||
Although I am in a very different situation to [Pieter Levels](https://levels.io), I find the ideas behind his [100 Thing Challenge](https://levels.io/the-100-thing-challenge) (and [other related pieces](https://levels.io/tag/minimalism)) to be inspiring.
|
|
||||||
|
|
||||||
Although my home contains items that are technically mine - furniture, kitchenware, decorations, etc. - I consider these as belonging to the _house_ itself rather than as my personal belongings. Personal items are essentially the things I can fit into my backpack and are things I actually _need_ on a daily or weekly basis: my laptop, my phone, some of my clothes, my toothbrush, passport, and a few other smaller items.
|
|
||||||
|
|
||||||
Non-essential things - although a "luxury" - are also a _liability_ (and an anchor).
|
|
||||||
|
|
||||||
This also helps to keep emotional attachment out of ownership. I know that if I were to lose or break my phone, I could get another and continue on as before. The main concepts here for me are _portability_ and _replaceability_.
|
|
||||||
|
|
||||||
I consider my data and communications to be personal belongings, too. For example, emails I've sent and received, documents, images, and so on. Since these are all digital, I can just stick them on my Nextcloud (or [pCloud](/blog/2021/02/24/google-photos-pcloud)), and I can access them any time through my phone or laptop.
|
|
||||||
|
|
||||||
I also strive for digital minimalism too, where possible. However, since this data storage methodology is scalable and I can keep things very organised I don't mind holding onto data and documents should they be useful in the future. Even with thousands of stored documents, the collection is still _portable_ and it fits with my model.
|
|
||||||
|
|
||||||
## Mail and paperwork
|
|
||||||
|
|
||||||
Many of the world's organisations - including insurance companies, banks, lawyers, and public services - still love doing business with physical documents and through physical mail. Also, these are typically the types of documents you are supposed to keep hold of for long periods of time for the purposes of financial records, insurance certification, and so on. Over time this paperwork builds up and quickly becomes disorganised.
|
|
||||||
|
|
||||||
Some people keep boxes or filing cabinets of documents and mail. This turns into something else to be responsible for. It's not portable (in the "backpack" idea mentioned earlier) or replaceable. If there was a fire it would be lost, and if moving home it's something else to "worry" about.
|
|
||||||
|
|
||||||
Until a couple of years ago, I kept documents in ring-binders. My process would include holepunching documents (retro, I know), finding the section of the ringbinder most appropriate for that document, placing the document, and then putting the ringbinders back on the shelf.
|
|
||||||
|
|
||||||
I had years' worth of utility bills, insurance documents, bank statements, pay-slips, and more that I would need to bring with me whenever I moved and always ensure there was a phycial space for them in my life somewhere.
|
|
||||||
|
|
||||||
I began to realise that - for the vast majority of these documents - I would never really need the _original_ version. Apart from things like my passport and paper certificates containing security features, document _copies_ would be fine. And since I already had a system for storing digital documents, I could extend this to maintain a more organised (and searcahble) collection of digitised paper documents too.
|
|
||||||
|
|
||||||
## Digitising paperwork
|
|
||||||
|
|
||||||
Phone cameras these days are more than capable of creating high-quality digital replicas of paper documents. There are also many scanner apps and software available to make this easier.
|
|
||||||
|
|
||||||
I personally use [Scanner Pro](https://apps.apple.com/app/apple-store/id333710667) on my iPhone, which is very useful. It automatically detects paper edges (even documents with weird dimensions) and straightens the image sensibly too. It also has settings to help configure further; for example, I only need greyscale copies and not the highest resolution - both of these factors help decrease the size of the eventual file.
|
|
||||||
|
|
||||||
The official iOS [Files app](https://apps.apple.com/us/app/files/id1232058109) also has a "Scan Documents" feature, which looks pretty good. I've not used this extensively myself yet.
|
|
||||||
|
|
||||||
After downloading the scanner app, I went through my ring-binders and piled up all the documents to throw out - stuff I just didn't need any record of but had, for some reason, kept anyway. I then went through each remaining section in turn and scanned each document in - storing each PDF to my Nextcloud.
|
|
||||||
|
|
||||||
The process was surprisingly quick and by the end I had a nicely organised collection of files on Nextcloud and a large pile of paper documents I could throw out. As I mentioned earlier, about the only physical things I _did_ keep were certificates, my passport, and a handful of other items.
|
|
||||||
|
|
||||||
It was a weirdly therapeutic exercise!
|
|
||||||
|
|
||||||
## My process now
|
|
||||||
|
|
||||||
Jumping back to the present and my more minimalism-focused self, I am now very strict about what paperwork I keep. In fact, I don't think I've kept hold of a physical document that I've received in the last year (and probably longer).
|
|
||||||
|
|
||||||
I have a simple process:
|
|
||||||
|
|
||||||
1. I receive the document/paperwork and open it;
|
|
||||||
1. I use my phone to scan the document;
|
|
||||||
1. I sync the file to an `0 Unfiled` directory on my Nextcloud, titled by date, sender, and short subject (e.g. `2021-03-02_BritishGas_Statement.pdf`);
|
|
||||||
1. I throw the document out (shredding first if sensitive);
|
|
||||||
1. If the paperwork requires action, I either do so immediately or set a reminder to do so;
|
|
||||||
1. Once a month or so I go through my `0 Unfiled` directory and categorise properly according to my personal filesystem.
|
|
||||||
|
|
||||||
I use a "holding" directory (`0 Unfiled`) to make the process quicker (for example, if there are several documents to scan) and it ensures I have actually actioned the files once I come round to organising them later. I use a `0` at the start of the directory name so that it sits at the top of my filesystem root in order to improve efficiency (and I try and use the [Johnny.Decimal](https://johnnydecimal.com) concepts as much as possible).
|
|
||||||
|
|
||||||
I also use the holding directory for other important documents - such as email attachments I want to include in this system. To me, it doesn't matter which medium was used to receive the document: it's all just data to be categorised and stored.
|
|
||||||
|
|
||||||
It's a satisfying process. I now feel more organised, I can easily find a particular document - even from several years ago - without needing to trawl through piles of paper; I can ensure _longevity_ and _integrity_ of the data (i.e. it can't get torn or damaged); I can back the collection up with added _redundancy_; and I can easily view and share the documents from anywhere.
|
|
||||||
|
|
||||||
If you currently keep lots of paper records and are interested in minimising your physical footprint then I can recommend trying a similar process yourself.
|
|
@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-10T20:18:00Z"
|
|
||||||
title: "The Hunt for Red October by Tom Clancy"
|
|
||||||
description: "My thoughts on The Hunt for Red October by Tom Clancy"
|
|
||||||
tags: [100daystooffload, book, opinion]
|
|
||||||
slug: red-october
|
|
||||||
---
|
|
||||||
|
|
||||||
I recently finished reading [The Hunt for Red October](https://www.goodreads.com/book/show/19691.The_Hunt_for_Red_October) by [Tom Clancy](https://www.goodreads.com/author/show/3892.Tom_Clancy).
|
|
||||||
|
|
||||||
![Book cover for The Hunt for Red October](/media/blog/red-october.jpg)
|
|
||||||
|
|
||||||
This genre of novel (sort of military thriller fiction) is not usual for me and this is the first Clancy book I have read. That being said, the book has been on my "to-read" list for a fair amount of time and so I am glad I got round to reading it.
|
|
||||||
|
|
||||||
I also hadn't seen [the movie](https://www.imdb.com/title/tt0099810) (starring Sean Connery and Alec Baldwin) by the time I read it and so I didn't have any pre-perceived ideas about the story and could read afresh.
|
|
||||||
|
|
||||||
Side note: I have now since watched the movie, and whilst the core plot is mostly the same there are a lot of differing details throughout (in terms of both angle and storyline), and so I can certainly recommend both media if you've previously seen one or the other or neither.
|
|
||||||
|
|
||||||
In general, I very much enjoyed the book. It was an exciting read from start to finish, with interesting characters, relationships and story arcs. I was fascinated by all of the technical detail and also felt that it helped explain and justify many of the core concepts and features of the story. The character development was good, and you quickly build a connection with many of the different people involved.
|
|
||||||
|
|
||||||
Though I do not think this a fault of the author (I imagine the work is an accurate reflection given the time of the setting), I would hope that if it were written in modern times there would be improved gender diversity and more female representation in the novel - as it is I do not remember there being a single female character (aside from mentioning wives and family members who do not appear in the story directly).
|
|
||||||
|
|
||||||
Either way, I can certainly recommend the book to others who also enjoy an exciting story and lots of technical detail. I thought the run-up to the ending was great and I am definitely intrigued to further my reading in this genre.
|
|
@ -1,45 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-15T11:05:00Z"
|
|
||||||
title: "The Tildeverse"
|
|
||||||
description: "Why the Tildeverse is interesting and why you might want to join in."
|
|
||||||
tags: [100daystooffload, technology]
|
|
||||||
slug: tildeverse
|
|
||||||
---
|
|
||||||
|
|
||||||
## The last twenty years of internet evolution
|
|
||||||
|
|
||||||
Although I was still somewhere between being of single-digit age and a young teen back in the '90s and early '00s, I still fondly remember discovering and becoming a small part of the flourishing community of personal, themed, and hobby websites that connected the web.
|
|
||||||
|
|
||||||
We were even given basic server space in school and the wider internet was thriving with [GeoCities](https://en.wikipedia.org/wiki/Yahoo!_GeoCities) and communities grew around services like [Neopets](http://www.neopets.com). Everyday, after school, we'd go home and continue our playground conversations over [MSN Messenger](https://en.wikipedia.org/wiki/Windows_Live_Messenger) (after waiting for the dial-up modem to complete its connection, of course). The internet felt small and personal (even if you didn't use your real name or identity) and _exciting_.
|
|
||||||
|
|
||||||
For those more tech-aware than I during those days there were also the established [BBS systems](https://en.wikipedia.org/wiki/Bulletin_board_system), [IRC](https://en.wikipedia.org/wiki/Internet_Relay_Chat) (which is still very much in active use), and several other types of available internet and communication services.
|
|
||||||
|
|
||||||
Over the years since then we've obviously seen the introduction and growth of tech companies, which have exploded into nearly every corner of the internet. Some have come and gone, but many are here still and continue to grow. We're now at a point where many of these services are almost a full "internet" (as it was back in the day) by themselves: on Facebook you can host a page for yourself or your business, you can engage with any nunber of other apps through your Facebook account, you can chat in real-time with individuals or groups of friends and family, and much more.
|
|
||||||
|
|
||||||
In the developing world, many people see the [internet and Facebook](https://medium.com/swlh/in-the-developing-world-facebook-is-the-internet-14075bfd8c5e) as being entirely analogous such that new mobile handsets are sold with the app pre-installed on their device and cellular carriers [sometimes provide free access to the platform](https://www.fool.com/investing/2020/05/22/facebook-expanded-internet-access-africa-1-billion.aspx) as part of their data plan.
|
|
||||||
|
|
||||||
This boom (invasion?) has completely changed the way the internet works for day-to-day users. Although these companies and their huge marketing teams have facilitated the growth of adoption of technology for community and communication, it has come at a cost. When using these services, the internet no longer feels personal and exciting.
|
|
||||||
|
|
||||||
For many people - particularly those who grew up with this state of the world or those who never fully engaged before Web 2.0 - this is fine and not a problem. They would likely laugh at the simplicity and "slowness" of the "old internet" compared to the flashy, speedy and engaging platforms they are used to interacting with for several hours every day.
|
|
||||||
|
|
||||||
## Community through the _Tildeverse_
|
|
||||||
|
|
||||||
However, there are also many of us who miss the _quality_ and _meaningfulness_ of the smaller and slower web. Since joining Mastodon a couple of years back, it's been great to be part of a movement that actively encourages the growth and maintenance of personal websites, blogs, distributed systems, and the self-hosted services that help promote these ideologies.
|
|
||||||
|
|
||||||
Movements and concepts such as the [Small Web](https://ar.al/2020/08/07/what-is-the-small-web), the [Indie Web](https://indieweb.org), and even initiaives like [Project Gemini](https://gemini.circumlunar.space) have all helped to raise awareness around the fact that there is still a large number of people interested in promoting the ideas around the [slow web](https://jackcheng.com/essays/the-slow-web), and building a real sense of _community_.
|
|
||||||
|
|
||||||
Also part of this movement is the notion of the _Tildeverse_. The [Tildeverse](https://tildeverse.org) draws some inspiration from [PubNix](https://www.pubnix.net) and stems from building community through "belonging" - similar to how one might feel when interacting with the [Fediverse](https://en.wikipedia.org/wiki/Fediverse).
|
|
||||||
|
|
||||||
The Tildeverse is an opportunity for people to _donate_ server resources by provisioning and managing a \*nix system (e.g. Linux, BSD, or similar), on which members of that _tilde community_ can have a user account that they can access using programs such as [SSH](https://en.wikipedia.org/wiki/SSH_(Secure_Shell)).
|
|
||||||
|
|
||||||
The name is derived from the fact that the tilde symbol (`~`) is used to denote a user's _home directory_ on UNIX-like systems that offer multiuser functionality (e.g. `~will`). On such servers, users can use their account and home directory to publish a website, a Gemini capsule, use tools to chat with other members via IRC or internal mail, or take advantage of any number of other services the server administrators may offer.
|
|
||||||
|
|
||||||
To join, it is recommended to first identify a community you feel you can contribute positively towards. Many servers don't require payment to join (although there are often options to make donations to help contribute towards the running costs), but it is usually expected that you help foster the sense of community by actively engaging with others, posting interesting or useful content, or by abiding by other "rules" that may be in place.
|
|
||||||
|
|
||||||
If you have found a community you'd like to join, a typical registration is often achieved by emailing the server administrators with your desired username and an SSH public key. If and when your registration is accepted, you can then use the corresponding private key to login and begin to engage with the community.
|
|
||||||
|
|
||||||
Many such communities, such as [tilde.club](https://tilde.club), list some of the users' home directories as webpages. This lets you get an idea of the community before choosing to join. Many homepages (though this isn't limited to the Tildeverse) include a _webring_, which you can use to navigate to other user websites belonging to the same webring.
|
|
||||||
|
|
||||||
Others, such as [tanelorn.city](https://tanelorn.city), are more focused on publishing Gemini content if this is more interesting to you.
|
|
||||||
|
|
||||||
Either way, I'd recommend browsing from [tildeverse.org](https://tildeverse.org) as a starting point if you're interested in getting involved. It helps explain some of the concepts and lists some of the Tildeverse _member_ servers.
|
|
@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-17T19:23:00Z"
|
|
||||||
title: "Blood, Sweat, and Pixels by Jason Schreier"
|
|
||||||
description: "My thoughts on the book 'Blood, Sweat, and Pixels' by Jason Schreier"
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: blood-sweat-pixels
|
|
||||||
---
|
|
||||||
|
|
||||||
This post contains some of my thoughts on the book _[Blood, Sweat, and Pixels](https://www.goodreads.com/book/show/34376766-blood-sweat-and-pixels)_ by [Jason Schreier](https://www.goodreads.com/author/show/16222011.Jason_Schreier).
|
|
||||||
|
|
||||||
![Blood, Sweat, and Pixels book cover](/media/blog/blood-sweat-pixels.jpg)
|
|
||||||
|
|
||||||
This book contains a number of stories about how some of the most well-known (and other less well-known) video games are made. The book's subtitle, "_The Triumphant, Turbulent Stories Behind How Video Games Are Made_", sums it up pretty well.
|
|
||||||
|
|
||||||
Working in the software industry myself, I often hear about the notion of "crunch time", which is a term we've borrowed from the game devleopment industry at times when critical updates, fixes, or deadlines are pressing. However, after reflecting on the stories in this book, it makes me realise that the "crunches" we suffer are nothing to the crunch and stresses experienced by game developers in many small teams and large development studios alike.
|
|
||||||
|
|
||||||
Every chapter explains in detail the pain and reward faced by game developers and management teams on an ongoing basis. The developer skill and expertise required by game studios, and the time and size of the required resource, helps to explain the huge financial impact these projects have.
|
|
||||||
|
|
||||||
It's no wonder why such harsh deadlines are set. In many cases it's a matter of "life or death": either the game gets released on time or there is no game at all and everyone has to lose their job - even in large well-funded companies.
|
|
||||||
|
|
||||||
I loved the stories of the groups of developers that ended up leaving their well-paid (but stressful) jobs in order to start something by themselves as a smaller group - not quite realising at the start what they were letting themselves in for.
|
|
||||||
|
|
||||||
I enjoyed the story behind the development of the game _Stardew Valley_. This is a game I love and have played for hours on my Switch - not knowing really (or fully appreciating) where the game came from and all the time spent by its solo developer and the stress that went on behind the scenes.
|
|
||||||
|
|
||||||
The background to the development of _The Witcher 3_ was also fascinating; how the relatively small but super-ambitious studio [CD Projekt Red](https://en.cdprojektred.com) successfully brought to the world stage the Polish much-loved fantasy world.
|
|
||||||
|
|
||||||
The book was great, and well-narrated by [Ray Chase](https://en.wikipedia.org/wiki/Ray_Chase_(voice_actor)) (I listened to the [Audible version](https://www.audible.co.uk/pd/Blood-Sweat-and-Pixels-Audiobook/B075KG1SBW)). I only wish there were more stories (it only took a few days to get through), but I appreciate the effort the author went into with researching and interviewing some of the key people involved. It is an excellent insight into how parts of the game industry work.
|
|
@ -1,136 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-22T11:50:00Z"
|
|
||||||
title: "Running your own Matrix homeserver"
|
|
||||||
description: "A rough guide on how to run your own Matrix homeserver."
|
|
||||||
tags: [100daystooffload, technology, selfhost]
|
|
||||||
image: header-host-matrix.png
|
|
||||||
imageDescription: AI artwork of a home server.
|
|
||||||
slug: host-matrix
|
|
||||||
---
|
|
||||||
|
|
||||||
# Why use decentralised communication services
|
|
||||||
|
|
||||||
Centralised communication services, such as Telegram, Signal, and Whatsapp, offer convenient means to chat to friends and family using your personal devices. However these services also come with a number of pitfalls that are worth considering. For example;
|
|
||||||
|
|
||||||
- Many of these services are linked to your phone number, which can affect your privacy.
|
|
||||||
- They can be invasive with your contacts (_"Jane Doe is now using Telegram!"_).
|
|
||||||
- They usually require you to use proprietary client software. If your OS/platform isn't supported then you can't use that service.
|
|
||||||
- They typically require that everyone using the service has to use the same client software.
|
|
||||||
- They can be unreliable (Whatsapp frequently has downtime).
|
|
||||||
- They are invasive and collect data about you (particularly Whatsapp). If you don't pay for the service, then _you_ are the product.
|
|
||||||
- Even though Signal is encrypted end-to-end, its servers are based in the US and are subject to the laws there. Also their open-source server-side software appears to [not have been updated](https://github.com/signalapp/Signal-Server) for some time.
|
|
||||||
|
|
||||||
There are, of course, other factors on both sides that you may want to consider. It can be hard to move away from these services - after all, there's no point using a system that no-one else you need to talk to uses.
|
|
||||||
|
|
||||||
However, for some people, being able to avoid these issues can be important. One way to do so is to participate in a (preferably open-source) decentralised communication service in which the entire network is not owned by a single entity and where data colleciton is not the business model. This also helps prevent unstability and downtime, since there is not a single point of failure.
|
|
||||||
|
|
||||||
This is analogous to using services such as Mastodon and Pixelfed over Twitter and Instagram, respectively - the underlying software is open-source and anyone can host an "instance". In these cases, each instance can communicate with others using the [ActivityPub](https://en.wikipedia.org/wiki/ActivityPub) protocol. In this post I will talk about another protocol that offers decentralised and federated encrypted communication.
|
|
||||||
|
|
||||||
# The Matrix protocol
|
|
||||||
|
|
||||||
The [Matrix protocol](https://www.matrix.org) is one example of a standard for real-time decentralised communication. Since the standard is open, anyone can build server and client software that enables end-to-end encrypted communication between two or more people. Another example of a similar protocol is [XMPP](https://en.wikipedia.org/wiki/XMPP), which is also very popular and has been around (in its earlier forms) since 1999.
|
|
||||||
|
|
||||||
When using Matrix, you belong to a "homeserver". This is where your messages and some account details are stored. However, since Matrix is a _federated_ protocol, you can use your account to communicate with others on your homeserver as well as people from other homeservers that federate with yours.
|
|
||||||
|
|
||||||
The standard was introduced back in 2014, and by now there is an established ecosystem of software available for use. In fact, you can use [Element](https://element.io/get-started) on your device and get started by joining an existing homeserver right now.
|
|
||||||
|
|
||||||
Additionally, if you don't want the hassle of self-hosting yet another service, then [Element also provides plans](https://element.io/matrix-services) that allow you to run your own homeserver on managed hosting.
|
|
||||||
|
|
||||||
# Self-hosting a Matrix homeserver
|
|
||||||
|
|
||||||
If you want more control over your data, you may opt to self-host your own homeserver that implements the Matrix standard. Even if you self-host you can still take advantage of the protocol's federation features and communicate with people on other homeservers.
|
|
||||||
|
|
||||||
The resource requirement for Matrix servers is a bit on the heavier side (especially when compared to the lighter XMPP servers). However if you already run a small-ish VPS anyway (as I do for things like Nextcloud), and if you only expect one or two people to be enrolled directly on your homeserver, then you can certainly host Matrix on that same VPS without too much trouble. For reference, I have a single $10 server from [Linode](https://www.linode.com), which happily runs Matrix alongside a number of other services.
|
|
||||||
|
|
||||||
The [Synapse project](https://github.com/matrix-org/synapse) is probably one of the most robust and feature-complete homeserver implementations, and is the one I'll talk about in this post. They also offer an officially supported [Docker image](https://hub.docker.com/r/matrixdotorg/synapse), which is what I would recommend using to keep things in one place.
|
|
||||||
|
|
||||||
## Homeserver name
|
|
||||||
|
|
||||||
Firstly, I'd recommend setting up a domain (either an existing one or a new one) and then updating your DNS such that the relevant entry points to your server.
|
|
||||||
|
|
||||||
It is important to think about the domain name you choose for your homeserver, since this cannot be changed later. [Matrix recommends](https://github.com/matrix-org/synapse/blob/master/INSTALL.md#choosing-your-server-name) using your root domain name itself rather than a subdomain for your homeserver name. However if you already host a website using your full domain name you will need some extra configuration to make it work properly. I personally don't, as I wanted an easier setup!
|
|
||||||
|
|
||||||
## Exposing ports and preparing TLS certificates
|
|
||||||
|
|
||||||
In order to configure HTTPS, I'd recommend setting up an Nginx container or server as a reverse proxy and issuing certificates using Let's Encrypt. The Matrix protocol uses standard port 443 for communication with clients (e.g. from an app) - known as the "client port" - and port 8448 for communication with other homeservers (the "federation port").
|
|
||||||
|
|
||||||
You may wish to read some of the [official documentation](https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md) on setting up a reverse-proxy, but I'll run through roughly what I do below.
|
|
||||||
|
|
||||||
Depending on your Nginx setup, you may need a couple of `server` blocks similar to the following to configure your reverse proxy (assuming your homeserver name is "example.com"):
|
|
||||||
|
|
||||||
```
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
server_name example.com;
|
|
||||||
return 301 https://$host$request_uri;
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 443 ssl;
|
|
||||||
listen [::]:443 ssl;
|
|
||||||
listen 8448 ssl;
|
|
||||||
listen [::]:8448 ssl;
|
|
||||||
|
|
||||||
server_name example.com;
|
|
||||||
|
|
||||||
ssl_certificate /path/to/fullchain.pem;
|
|
||||||
ssl_certificate_key /path/to/privkey.pem;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_pass http://synapse:8008;
|
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
client_max_body_size 50M;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If you run Nginx as a Docker container remember also to expose port 8448 alongside 443.
|
|
||||||
|
|
||||||
Synapse uses port 8008 for HTTP communication, to which we forward requests received on both the secure client and federation ports. In the example above, `synapse` is the name of the container that runs my homeserver, as we'll cover next. Again, depending on your setup, and whether you choose to use Docker, you may need to change this value so that your reverse proxy can route through to port 8008 on your homeserver.
|
|
||||||
|
|
||||||
## Generate a configuration file
|
|
||||||
|
|
||||||
The next step is to generate your homesever config file. I recommend firstly creating a directory to hold your synapse data (e.g. `mkdir synapse_data`). We'll mount this to `/data` on the target container in order for the configuration file to be created.
|
|
||||||
|
|
||||||
The configuration file can be generated using Docker:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run -it --rm \
|
|
||||||
-v synapse_data:/data
|
|
||||||
-e SYNAPSE_SERVER_NAME=example.com \
|
|
||||||
-e SYNAPSE_REPORT_STATS=yes \
|
|
||||||
matrixdotorg/synapse:latest generate
|
|
||||||
```
|
|
||||||
|
|
||||||
Once this completes, your `synapse_data` directory should contain a `homeserver.yaml` file. Feel free to read through this and check out the [documentation](https://github.com/matrix-org/synapse) for ways in which it can be modified.
|
|
||||||
|
|
||||||
## Run the homeserver
|
|
||||||
|
|
||||||
Finally, we can now run the homeserver. Depending on your reverse proxy setup (and whether you are containerising anything else), you may need to configure your Docker networks, but generally you can just execute the following to get your homeserver running:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run -d \
|
|
||||||
-v synapse_data:/data
|
|
||||||
--name synapse \
|
|
||||||
--restart always \
|
|
||||||
matrixdotorg/synapse:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
If everything went well (and assuming your reverse proxy is also now up and running), you should be able to use your web browser to visit your Matrix domain (we used "example.com" above) and see a page that looks like this:
|
|
||||||
|
|
||||||
![Matrix homeserver confirmation page](/media/blog/matrix.png)
|
|
||||||
|
|
||||||
## Creating your user account
|
|
||||||
|
|
||||||
As long as your homeserver is configured to accept user registrations (via the `enable_registration` directive in `homeserver.yaml`), you should be able to [download a client](https://matrix.org/clients) (or use the [Element webapp](https://app.element.io)) and register your first user account.
|
|
||||||
|
|
||||||
Once logged-in you can join rooms, invite people, and begin communicating with others.
|
|
||||||
|
|
||||||
# Conclusion
|
|
||||||
|
|
||||||
This post aims to be a rough introduction to running your own Matrix homeserver. The Synapse software offers a variety of ways to tailor your instance, and so it is certainly worth becoming familiar with some of [the documentation](https://github.com/matrix-org/synapse) to ensure you have configured things the way you need.
|
|
||||||
|
|
||||||
If you want to get in touch then you can send me a message using Matrix (@wilw:matrix.wilw.dev) or on Mastodon ([@wilw@fosstodon.org](https://fosstodon.org/@wilw)).
|
|
@ -1,21 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-23T19:45:00Z"
|
|
||||||
title: "The Great Alone by Kristin Hannah"
|
|
||||||
description: "My thoughts on the book 'The Great Alone' by Kristin Hannah."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: the-great-alone
|
|
||||||
---
|
|
||||||
|
|
||||||
[_The Great Alone_](https://www.goodreads.com/book/show/34912895-the-great-alone) by [Kristin Hannah](https://www.goodreads.com/author/show/54493.Kristin_Hannah) is a book set out in the Alaskan wild. It tells the story of a young family that move in order to live off-the-grid after the father returns from being a prisoner of war in the Vietnam war.
|
|
||||||
|
|
||||||
![The Great Alone book cover](/media/blog/the_great_alone.jpg)
|
|
||||||
|
|
||||||
The book mostly focuses on the viewpoint of the daughter, Leni, who is thirteen years old when she moves with her mother and father. The story tells how Leni adapts and grows into her new Alaskan life over the years, whilst at the same time trying to navigate some of the perils at home in her family cabin. Leni and her family meet and grow close to different members of the local community, in which there are a variety of views regarding the types of people that should be allowed to come to Alaska.
|
|
||||||
|
|
||||||
The book certainly has its dark moments, and there is an ongoing sense of violence and intensity. At the same time, the author wonderfully describes the peacefulness of the environment, and the wildness of the Alaskan landscape, the wildlife, the weather, the sky, and the sea. It is clearly a place where humans and nature meet, and a place where - if people are to live off the land - they must learn and respect it and all it has to offer.
|
|
||||||
|
|
||||||
After all, in Alaska you can only ever make one mistake. The second one will kill you.
|
|
||||||
|
|
||||||
I loved the book and its intertwining themes of love, family drama (and more), forgiveness, wilderness, comradeship, and escapism. The author makes you feel frustrated with some of the decisions made by the characters in one moment, and the next you are cheering them on from behind the pages.
|
|
||||||
|
|
||||||
With everything that goes on in the story - the town and its community of interesting characters - it isn't always obvious where the title of the book comes from. However, as you progress further you realise that it's not just the landscape and geography that can evoke loneliness; the feeling can be more the result of the actions of others and having to keep secrets about what goes on behind closed doors.
|
|
@ -1,31 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-27T16:31:00Z"
|
|
||||||
title: "PinePhone and PineTime"
|
|
||||||
description: "Why I pre-ordered the PinePhone and a bit of talk about the PineTime, an open-source and hackable smartwatch."
|
|
||||||
tags: [100daystooffload, technology, pinephone, life]
|
|
||||||
slug: pinephone-pinetime
|
|
||||||
---
|
|
||||||
|
|
||||||
## Pre-ordering the PinePhone Beta
|
|
||||||
|
|
||||||
Earlier this week I ordered a [PinePhone](https://www.pine64.org/pinephone), which recently became [available as a Beta Edition](https://pine64.com/product-category/smartphones).
|
|
||||||
|
|
||||||
I've been excitedly following the progress of the PinePhone for some time now. I've joined various Matrix rooms, subscribed to [blogs](https://linmob.net), and started listening to the [PineTalk podcast](https://www.pine64.org/pinetalk). The phone is a hackable device that runs plain old Linux - not an Android variant - and thus helps users escape from the grasp of the Google and Apple ecosystems.
|
|
||||||
|
|
||||||
Other similar devices exist - such as the [Librem 5 from Purism](https://puri.sm/products/librem-5) - however the unopinionated nature of the PinePhone, and its cost ($150 compared to the Librem's $800), make the Pine64 offering much more attractive to me.
|
|
||||||
|
|
||||||
I understand that the phone and software are still under very active development, and I fully expect that the phone is not yet ready to become a daily driver. However I am excited to try it out, support the project, and contribute where I can. The potential of this movement is huge.
|
|
||||||
|
|
||||||
## Some thoughts on PineTime
|
|
||||||
|
|
||||||
Whilst researching the PinePhone, I stumbled across the [PineTime smartwatch](https://www.pine64.org/pinetime). This is a wearable device also from Pine64, which aims to offer an open-source and hackable system in a similar vein to the PinePhone.
|
|
||||||
|
|
||||||
Pine64 offers the device for purchase but fully acknowledges that it is not yet ready for daily use, and encourages interested people to instead purchase the [Development Kit](https://pine64.com/product/pinetime-dev-kit) so that they can learn more or contribute to the project.
|
|
||||||
|
|
||||||
The device aims to offer health tracking solutions (since it includes a step counter and heart rate detector) and notifications, and so the intention is for it to offer a similar experience to other smart watches - except with much more freedom.
|
|
||||||
|
|
||||||
The open and community-driven nature of the device could take it any number of ways.
|
|
||||||
|
|
||||||
> We envision the PineTime as a companion for not only your PinePhone but also for your favorite devices — any phone, tablet, or even PC - pine64.org/pinetime
|
|
||||||
|
|
||||||
This vision seems to embody the Pine64 philosophy that we see across all of their products. I'm not the right person to be able to contrubute much to the project in its current stage (I don't have much experience with developing on embedded operating systems), but I look forward to seeing how it progresses and hopefully getting more involved slightly further down the line.
|
|
@ -1,69 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-03-31T22:00:00Z"
|
|
||||||
title: "The simplicity and flexibility of HTTP for APIs"
|
|
||||||
description: "Understanding RESTful HTTP web APIs, and some frustrations with non-compliant services."
|
|
||||||
tags: [100daystooffload, technology, opinion]
|
|
||||||
slug: http-simplicity
|
|
||||||
---
|
|
||||||
|
|
||||||
# Simple and RESTful HTTP APIs
|
|
||||||
|
|
||||||
The [HTTP standard](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) is an expressive system for network-based computer-computer interaction. It's a relatively old standard - it started life as HTTP/1.0 in 1996 and the HTTP/1.1 standard was formally specified in 1999. HTTP/2 (2015) introduced efficiencies around _how_ the data is transmitted between computers, and the still in-draft HTTP/3 builds further on these concepts.
|
|
||||||
|
|
||||||
I won't go into the nuts and bolts of it, but - essentially - for most applications and APIs, the developer-facing concepts haven't really changed since HTTP/1.1. By this version, we had all the useful methods required to build powerful and flexible APIs.
|
|
||||||
|
|
||||||
When writing a web service (e.g. a website or a web-based REST API), actions are based around _resources_. These are the "things" or "concepts" we are concerned with. For example, if one was to write a to-do list app, two of the concepts might be "to-do list" and "to-do list item". Generally, such an app might also maintain user accounts and so may have "user" and "session" resources, too, along with others if required.
|
|
||||||
|
|
||||||
In such a service, resources are usually indicated by a _path_. This is the bit that comes after the host name (e.g. `example.com`) in the URL and are usually mentioned in _plural_.
|
|
||||||
|
|
||||||
For example, in our to-do list example, a resource which indicates _all_ available lists might be given simply by `/lists`, and a specific list with ID `aaa-bbb-ccc` would be available at `/lists/aaa-bbb-ccc`.
|
|
||||||
|
|
||||||
This system allows the engineer to indicate _hierarchy_ or ownership in the data model. For example, to address all of the list items in a specific list one might use `/lists/aaa-bbb-ccc/items`. Then to access an item with ID `xxx-yyy-zzz` inside this list you'd use `/lists/aaa-bbb-ccc/items/xxx-yyy-zzz`. In many cases, for a simple web service of this type, this would be sufficient - it may not be appropriate to enable addressing a to-do list item directly without the context of its "parent" list.
|
|
||||||
|
|
||||||
Paths may sometimes include other metadata, such as the API version being called, but this can be simply included and described in the documentation.
|
|
||||||
|
|
||||||
In HTTP, _methods_ describe _what_ the API consumer wants to do with a resource. Some of the most widely used methods are `GET`, `POST`, `PUT`, `DELETE`, and `OPTIONS`. These methods are defined in the spec and some clients may handle requests differently based on the method being used. Unlike resources, you can't define your own methods to use. However, the flexibility provided as-is allows for most services to be built without requiring this level of customisation.
|
|
||||||
|
|
||||||
Some of these have pretty obvious semantic meaning. `POST` is typically used to create a new resource (e.g. "post a new to-do list item") and `PUT` is used to update an existing resource.
|
|
||||||
|
|
||||||
This means that, given the combination of our resource addressing and methods, we can express a powerful web service. Structuring your to-do list app using the system described here caters well to typical to-do list actions: creating lists and items (e.g. `POST /lists/aaa-bbb-ccc/items`), crossing-off items (probably `PUT /lists/aaa-bbb-ccc/xxx-yyy-zzz`), and retrieving, updating, and deleting things in a similar way using the appropriate methods.
|
|
||||||
|
|
||||||
HTTP request _headers_ can be used to provide authentication information, describe how the client wants information to be returned in the response, along with other ways to further annotate the request being made and to customise the expected response. Of course, the effectiveness of supplying these request headers depends on the server's own capability and configuration. However, the use of headers should certainly be considered by the engineer whilst planning and building out the service.
|
|
||||||
|
|
||||||
Using standards like these - resources, methods, and headers - in your APIs enables your users (_consumers_) to more easily learn and understand how to use your service. This saves them time, helps your service to grow, and means you'll spend less time dealing with support requests (unless your documentation is really good).
|
|
||||||
|
|
||||||
# Custom implementations
|
|
||||||
|
|
||||||
I think the above system is the most ideal, expressive, learnable and _expected_ way of building web services.
|
|
||||||
|
|
||||||
However, HTTP is flexible, and your server-side code can - in theory - do whatever you want it to, no matter what the request path, method, and headers are. But I don't really understand why one would want to.
|
|
||||||
|
|
||||||
I recently [migrated my photos collection to pCloud](/blog/2021/02/24/google-photos-pcloud), and wanted to explore their API to see if I could also use the service for programmatically backing-up other things, too.
|
|
||||||
|
|
||||||
Unfortunately I am unable to actually use their API, since I use two-factor authentication on pCloud and the API doesn't seem to work if this extra layer of security is in-place. However, whilst researching I discovered that pCloud's API is an example of a service that seems to defy the standards one is usually familiar with.
|
|
||||||
|
|
||||||
For example, it appears that it's perfectly acceptable to use `POST https://api.pcloud.com/deletefile?fileid=myfile` to delete a file or `GET https://api.pcloud.com/renamefolder?path=oldfolder&topath=newfolder` to rename a folder.
|
|
||||||
|
|
||||||
There's nothing _technically_ wrong with this implementation, especially given the fact that I'm sure it works. It perhaps makes it easier to route requests through to the correct internal functions. However it just feels _inelegant_ to me, and it seems to focus more on what's easier for them rather than their users.
|
|
||||||
|
|
||||||
The [page that lists file operations](https://docs.pcloud.com/methods/file) could instead show a couple of simple example paths and then rely on request _methods_ and parameters to describe available options.
|
|
||||||
|
|
||||||
I don't mean to pick on pCloud - the service itself is great and I'm sure the API works nicely. I plan to continue using the service via its web UI and official clients. I only bring it up because it seems odd to re-invent the wheel.
|
|
||||||
|
|
||||||
I'm completely on-board with the notion of discouraging system and process monopoly, but I don't think this is the same thing. The web is formed from a set of open standards that anyone can comment on or help contribute to.
|
|
||||||
|
|
||||||
# "Good" implementation examples
|
|
||||||
|
|
||||||
The web is full of services that expose sensible and learnable APIs.
|
|
||||||
|
|
||||||
An example I always love is the Stripe API - arguably a much more complex service than pCloud. However its [simple "compliant" API](https://stripe.com/docs/api/charges) makes credit card payments - and loads more - very easy to integrate with.
|
|
||||||
|
|
||||||
The [Spotify web API](https://developer.spotify.com/documentation/web-api/reference) also looks useful, though I haven't used that before myself.
|
|
||||||
|
|
||||||
# Beyond REST
|
|
||||||
|
|
||||||
REST has been a cornerstone of the web over the past couple of decades, and I think there is still very much a space for it - both for now and in the near future. Its flexibility has allowed it to remain useful across industries and settings - from small private IoT setups through to highly-secure enterprise-enterprise systems.
|
|
||||||
|
|
||||||
There are movements to begin using other technologies that may be better suited to the future of the web and communication - particularly as things continue to scale. Efforts such as [GraphQL](https://graphql.org), [Netflix's Falcor project](https://netflix.github.io/falcor), and even [RPC](https://en.wikipedia.org/wiki/Remote_procedure_call) provide alternatives for when REST isn't the most appropriate solution.
|
|
||||||
|
|
||||||
However, if building a web API that you want other people to use, and which would be well suited to REST, then I always think it's worth sticking to these HTTP standards as much as possible.
|
|
@ -1,80 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-04T11:10:00Z"
|
|
||||||
title: "From Apple Mail to Spark to Thunderbird"
|
|
||||||
description: "Three months: three mail clients. Some thoughts."
|
|
||||||
tags: [100daystooffload, technology, opinion]
|
|
||||||
slug: applemail-spark-thunderbird
|
|
||||||
---
|
|
||||||
|
|
||||||
Like many people, I own and manage multiple email accounts - for example, some are for work, for home, or for specific projects. I used to be a strong user of solely web-based email clients (such as Gmail or Fastmail's web apps) for each of my accounts. However the number of tabs I needed to keep open for all of this grew to the point where things became unmanageable - both in terms of needing to check multiple tabs several times per day and also frustrations when the browser would restart, or if I'd lose my tab setup for some other reason.
|
|
||||||
|
|
||||||
I needed a proper client, and although I knew that web-based software like [Roundcube](https://roundcube.net) and [Rainloop](https://www.rainloop.net) existed - which I could self-host - they just never felt stable or feature-ful enough.
|
|
||||||
|
|
||||||
This post is a short round-up of three mail clients I've been trying over the past few months.
|
|
||||||
|
|
||||||
## Apple Mail
|
|
||||||
|
|
||||||
For several years I've been an Apple Mail user on both my Mac and iPhone - mainly because it was the default on the devices I have but also because it's generally quite smooth and works reliably.
|
|
||||||
|
|
||||||
It's relatively painless to get setup, and the accounts sync well across the mail, contacts, and calendar apps. On Gmail (and other larger providers) there is an authentication "wizard" to help get the accounts setup. Fastmail allows you to [install profiles](https://www.fastmail.help/hc/en-us/articles/1500000279941-Set-up-iOS-devices-iOS-12) that automatically configure everything for you.
|
|
||||||
|
|
||||||
However, over time I began to find the interface a bit unintuitive. On iOS the general "Accounts" setting - which was useful as a single source of truth - seemed to disappear, and for some mailboxes it wouldn't let me add alias send-from addresses. I'm sure there was a reason for this, but I sometimes find that the iOS settings UIs overcomplicate things in their efforts for simplicity.
|
|
||||||
|
|
||||||
Whilst the Mac (currently) still has a dedicated Accounts setting in System Preferences, it also had other problems. Several times a day I'd frustratingly get my workflow interrupted by warnings of network problems.
|
|
||||||
|
|
||||||
![Apple Mail 'accounts offline' warning](/media/blog/apple-mail-issue.png)
|
|
||||||
|
|
||||||
I still think Apple Mail is a pretty decent app, but I thought that there must be something else out there that would work better, and be less frustrating, for me.
|
|
||||||
|
|
||||||
## Spark Mail
|
|
||||||
|
|
||||||
Back in February some of my colleagues recommended the [Spark mail app](https://sparkmailapp.com) from [Readdle](https://readdle.com). I've used some of Readdle's other software in the past (see [this post](/blog/2021/03/08/getting-mail), for example), and generally find it quite useful and intuitive. Like Apple Mail, it's also available for Mac and iOS.
|
|
||||||
|
|
||||||
Spark is free to get started (and I imagine most individuals would fit into their [free plan](https://sparkmailapp.com/pricing) long-term too). One of the features I immediately liked was that all of your mail accounts are tied to a single account. That means that if you get a new computer or phone, you don't need to go through the tedious business of setting up all the mail accounts again - just login with your main email and everything else gets pulled-through.
|
|
||||||
|
|
||||||
Email management is easy, search is lightning-fast, and the settings are useful.
|
|
||||||
|
|
||||||
Spark also comes bundled with a calendar that syncs well and automatically with services like Google Calendar and Fastmail Calendar. Like Apple Mail, there are dedicated setup wizards for email and calendar with the larger providers, and an option for manual entry for others. The calendar's event creator is nice, and also allows you to automatically schedule a video meeting.
|
|
||||||
|
|
||||||
![Spark calendar video meeting picker](/media/blog/spark-calendar-meeting.png)
|
|
||||||
|
|
||||||
One drawback is that there doesn't seem to be any way to view or manage contacts, and neither does it seem to integrate with the system contacts. I imagine it works directly with the relevant provider's contacts service.
|
|
||||||
|
|
||||||
Another frustration I had was in managing shared calendars. I think I'm a bit of a calendar power-user, however I imagine this must be affecting other people too. If someone else - who also shares their calendar with you - creates an event and invites you to it, there does not seem to be any way to select your own entry in order to interact with it (e.g. to accept or decline the invitation).
|
|
||||||
|
|
||||||
In the event below, if my calendar was the "green" one, for example, there is no way for me to select that in order to accept or decline. Again, I may be missing something but I've been trying to find a way for a while now without needing to "hide" my colleagues' calendars first.
|
|
||||||
|
|
||||||
![Spark calendar event with multiple attendees](/media/blog/spark-calendar-selection.png)
|
|
||||||
|
|
||||||
Then it comes to security. Whilst I "trust" Readdle - in that I imagine they have decent security practices in place - we know that even the most secure companies can become compromised. The account sync feature mentioned earlier is certainly useful, however this must mean that Readdle are storing the Gmail access keys or IMAP connection details on their own servers in a centralised location. Your email is the last thing you want to get compromised - since it is likely that this controls a number of your other online accounts - and so this risk is a bit of a concern.
|
|
||||||
|
|
||||||
Readdle [claim that](https://sparkmailapp.com/blog/privacy-explained) everything is encrypted at various levels but it still feels a little risky to me. Having the sync and push notifications is useful, and so it's up to the individual to choose what works best for them.
|
|
||||||
|
|
||||||
## Thunderbird
|
|
||||||
|
|
||||||
The last client I want to mention in this post is [Mozilla's Thunderbird](https://www.thunderbird.net). This is a bit of a re-visit for me, since this is the client I used consistently during my University years.
|
|
||||||
|
|
||||||
In honesty, the client doesn't seem to have changed a huge amount over the last decade, but then again - neither have the underlying email technologies themselves. It's an open-source client available on a number of operating systems - but not yet ([or ever?](https://support.mozilla.org/en-US/questions/990147)) for mobile.
|
|
||||||
|
|
||||||
Despite the slower development, I find Thunderbird to be a very powerful client. It has great support for email, calendar, and contacts straight out of the box. Things seem clearly organised, and account-management is super easy. There are no dedicated setups for Gmail, Outlook, etc., but it was able to automatically detect the relevant IMAP/SMTP servers for all of my accounts.
|
|
||||||
|
|
||||||
It's very unopinionated about ordering, views, threading, and much more - which allows you to set things up the way that works best for you. The interface doesn't try to be flashy or too clean and I find I am very productive when using it.
|
|
||||||
|
|
||||||
The calendar is easy to use and works with open standards like CalDAV.
|
|
||||||
|
|
||||||
It also has built in support for chat through systems like IRC and XMPP (if you use these types of things), and there's also a rich ecosystem of plugins to add extra functionality too. It's certainly the most flexible and powerful of the desktop mail apps I've used.
|
|
||||||
|
|
||||||
A few areas where it frustrates are around its performance. When adding a new account it proceeds to automatically download all of the mail headers for that account to be stored locally in its databases. This allows it to support searching and other local tasks, but the process causes the app to run slowly whilst it's in progress. If you change computer often, or have several machines to setup, then this could be a pain.
|
|
||||||
|
|
||||||
When opening large mail folders containing perhaps several hundreds of thousands of messages - for example my combined "Archive" folder - things get _very_ slow to the point where it is unusable. However, I don't really ever need to use these views so this isn't too much of a problem for me, but for some people this could be a blocker.
|
|
||||||
|
|
||||||
When compared to Apple Mail and Spark, the search function seems very slow. The results returned are quite accurate though, and the fact that results get shown in their own "tab" means that your flow isn't interruped elsewhere in the app. This is a nice feature.
|
|
||||||
|
|
||||||
Generally, I love Thunderbird. Mozilla is renowned for being privacy-centric and the fact that everything is stored locally gives me more confidence about its security. Of course, it has drawbacks which will put some people off, but it's good to be supporting open-source software where possible.
|
|
||||||
|
|
||||||
## Conclusion
|
|
||||||
|
|
||||||
Like a web browser, I think people should be free to continue to try alternative mail clients as their needs and the software features change. The software mentioned above comprise just a small sample, and are focused largely around the Apple ecosystems since these are the devices I happen to be using at the moment.
|
|
||||||
|
|
||||||
Some others I'd like to try are [Airmail](https://airmailapp.com) and [Polymail](https://polymail.io). However, it'd be great to get some feedback on what other people are using. If you have any suggestions then please get in touch using Matrix (@wilw:matrix.wilw.dev) or on Mastodon ([fosstodon.org/@wilw](https://fosstodon.org/@wilw)).
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-07T19:44:00Z"
|
|
||||||
title: "Is Facebook scraping the Fediverse?"
|
|
||||||
description: "Is Facebook using the Fediverse to suggest posts to users?"
|
|
||||||
tags: [100daystooffload, technology]
|
|
||||||
slug: is-facebook-scraping-fediverse
|
|
||||||
---
|
|
||||||
|
|
||||||
I don't use Facebook often. In fact, I only have an account currently because our company uses the "Login with Facebook" functionality in order to offer an additional single sign-on option for some customers.
|
|
||||||
|
|
||||||
I logged-in today as we needed to update some of the app's configuration on the Facebook Developer portal, and I went via the Facebook homepage feed to get there. A couple of "Suggested for you" posts that showed near the top of my feed were unusual and caught my eye.
|
|
||||||
|
|
||||||
![Facebook Suggested Post Tram picture](/media/blog/facebook_tram_1.png)
|
|
||||||
|
|
||||||
There wasn't just one. As I scrolled further, more and more showed up - all seemingly from the same user.
|
|
||||||
|
|
||||||
![Another Facebook Suggested Post Tram picture](/media/blog/facebook_tram_2.png)
|
|
||||||
|
|
||||||
![Yet another Facebook Suggested Post Tram picture](/media/blog/facebook_tram_3.png)
|
|
||||||
|
|
||||||
The page ("Nostalgia Vienna") doesn't seem to be selling anything in these posts, and I've never interacted with them before. I also don't have any content on Facebook and use browser plugins such as [Firefox Containers](https://addons.mozilla.org/en-US/firefox/addon/multi-account-containers), [Privacy Badger](https://addons.mozilla.org/en-US/firefox/addon/privacy-badger17), and others to try and prevent inadvertent data sharing with the social platform.
|
|
||||||
|
|
||||||
I know Facebook potentially has other ways of gathering user information, but I just simply don't have a big interest in Viennese trams (or trams in general). I don't really know why it is so keen to show me a new picture of a tram every few posts down the home feed.
|
|
||||||
|
|
||||||
I then realised that I recently [posted a picture to Pixelfed](https://pixelfed.social/p/wilw/267598377078886400) of a tram that I took on a trip to Basel a few years back.
|
|
||||||
|
|
||||||
![A screenshot of my tram photo from Pixelfed](/media/blog/pixelfed_tram.png)
|
|
||||||
|
|
||||||
My [Pixelfed account](https://pixelfed.social/wilw) is not explicitly tied to my own name or identity, but my bio there does contain a link to [my website](https://wilw.dev).
|
|
||||||
|
|
||||||
Interestingly, the styles and images of the Viennese trams suggested by Facebook are not a million miles away from my own post of the Swiss tram. The link feels tenuous but I can't think of anything else that might cause Facebook's algorithm to so strongly suggest this type of content to me.
|
|
||||||
|
|
||||||
I just wonder whether there is some clever scraping going on behind the scenes to further bolster Facebook's knowledge of its users.
|
|
@ -1,37 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-12T21:38:00Z"
|
|
||||||
title: "Six months of Invisalign"
|
|
||||||
description: "How my Invisalign treatment went: the process and my thoughts."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
slug: invisalign
|
|
||||||
---
|
|
||||||
|
|
||||||
Back in November I started an [Invisalign](https://www.invisalign.com) course to help straighten my teeth. Invisalign works like traditional braces, but is instead formed from transparent teeth "trays" that others can only really notice up-close. Given my personal situation, this seemed like a better approach than the traditional metal braces.
|
|
||||||
|
|
||||||
![My Invisalign goodie bags](/media/blog/invisalign.png)
|
|
||||||
|
|
||||||
In all honesty, my teeth weren't that bad to begin with but - like many people - as I got older I was beginning to notice a little more "crowding" (where teeth bunch together and start to move out of place). Invisalign was something I had wanted to try for a while, and whilst the UK was in lockdown and I couldn't see anyone anyway, it felt like a good time to go ahead with it.
|
|
||||||
|
|
||||||
# The process
|
|
||||||
|
|
||||||
I had a couple of initial appointments with my dentist just to ensure I was dentally fit for orthodontic work and in order to take a scan of my teeth. The scan was cool - it showed exactly what my teeth looked like and the software then uses the result to design a series of aligners that would bring the teeth back into line. I also got access to a website on which I could see how my teeth would be moving over time.
|
|
||||||
|
|
||||||
After my scans, I went back to the dentist a couple of weeks later in order for some attachments to be added to my teeth and to collect my newly-manufactured aligners. In total I was given 22 sets of aligners, with the aim being to start with set number 1 and then proceed to the next one each week - every change in aligner gradually moving the teeth into line.
|
|
||||||
|
|
||||||
I was also given a scanbox, into which I could place my phone in order to submit photos of my teeth every week through an app to my dentist. This enabled him to track the progress each week and to ensure I moved onto the next aligner set at the right time.
|
|
||||||
|
|
||||||
For the next two-three months I wore my aligners for 22 hours each day. Every week, I scanned my teeth and was instructed to move onto the next set of aligners in order to progress the treatment. In February I had to go back to visit the dentist in order to have some additional filing between some of my teeth so they could move into position properly.
|
|
||||||
|
|
||||||
I then continued for another few months - until today. I completed my last set of aligners last week and had a check-up this afternoon to see how things went. I was pleased with the result, and we agreed that no more movement was needed. My dentist removed the attachments from my teeth and we ordered the retainers, which I will need to continue to wear full-time for a few months and then beyond that just at night - in order to ensure things stay in place.
|
|
||||||
|
|
||||||
# My thoughts
|
|
||||||
|
|
||||||
In general, the process was super easy. For the first few days of the treatment I didn't think I would be able to keep it up for six months - the aligners felt pretty uncomfortable and can be a little painful for a couple of days every time I switched to a new set. Also, the extra work needed when brushing my teeth and having to remove the aligners between meals seemed inconvenient.
|
|
||||||
|
|
||||||
However, after a few weeks it all became second nature. It now feels weird when I don't have them in!
|
|
||||||
|
|
||||||
The treatment is also quite expensive. However, it is cheaper (I think?) than traditional braces, it's a shorter treatment period, and I preferred to have the almost-invisible aligners rather than metal braces in front of my teeth.
|
|
||||||
|
|
||||||
In addition, given that the sets of post-treatment retainers included in the treatment plan last for years, it feels like the treatment is a "one off" (🤞) - as long as I keep wearing the retainers properly then the teeth should now stay in place.
|
|
||||||
|
|
||||||
All in all, it was (and is still) a good experience and I am glad to have done it.
|
|
@ -1,77 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-17T14:00:00Z"
|
|
||||||
title: "Reporting business accounts using Ledger"
|
|
||||||
description: "Why I switched from Xero to Ledger, and how I could still report business accounts."
|
|
||||||
tags: [100daystooffload, finance, technology, ledger]
|
|
||||||
slug: business-accounts-ledger
|
|
||||||
---
|
|
||||||
|
|
||||||
As is the case with many countries, all businesses in the UK must report the state of their financial accounts to the relevant inland revenue service at their year-end (in the UK, this is [HMRC](https://www.gov.uk/government/organisations/hm-revenue-customs)).
|
|
||||||
|
|
||||||
This is also the case if you are a freelancer or sole-trader (or if you've made other untaxed income - e.g. from investments). In these cases, this is called your [Self Assessment](https://www.gov.uk/self-assessment-tax-returns). Self Assessments are pretty straight forward, and can usually be completed online by the indiviual themself - as long as they have kept good accounts and know their numbers.
|
|
||||||
|
|
||||||
However, the required year-end _business_ accounts are different and are more complex in order to account for all the different types of operating models and variety of business types. There are also various rules for businesses of different sizes and if you don't know what you're doing you may end up paying too much or too little tax.
|
|
||||||
|
|
||||||
As such, it is generally advisable to appoint an accountant to help you at your year-end (even if you're making a loss!). It gives you peace of mind and also saves you time.
|
|
||||||
|
|
||||||
My business year-end passed recently. Historically I've used [Xero](https://xero.com) to track business finances - since it is a good one-stop shop for issuing invoices, getting paid, tracking bills and finances, and automatically reconciling against your bank. It's a great tool for small businesses as it helps you make sure everything is correctly accounted for, and it allows your accountant to easily get the information they need in order to make their reports for your business to HMRC.
|
|
||||||
|
|
||||||
However, it is a paid-for service, and if you've paused trading at least temporarily - like me - or if you're going through a financial dry patch, it feels a waste to pay for something that you're not using.
|
|
||||||
|
|
||||||
About a year ago I got quite heavily into [plain-text accounting](/notes/plain-text-accounting) - it feels logical and in-control. I was using it for some of my personal finances and so I thought I'd also switch business bookkeeping to the [Ledger](https://www.ledger-cli.org) approach too.
|
|
||||||
|
|
||||||
I exported my Xero accounts into a new Ledger file and paused my Xero subscription. Every month I would run through my bank statement/invoices/bills, and update the ledger and reconcile against the business bank account. As such, when it came round to year-end, I had a full set of books for the relevant tax period.
|
|
||||||
|
|
||||||
This is where I worried a little. The lady who normally files my accounts had access to my Xero and can run everything from there (many small business accountants in the UK recommend and sometimes only work with Xero). I didn't want to have to look for and begin working with a new accountant, and so I looked to see if I could get Ledger to output balance sheets and P&Ls in a similar way to Xero.
|
|
||||||
|
|
||||||
The Ledger tool offers a number of reporting mechanisms. The most useful are perhaps the `balance` and `register` commands, which respectively show the balance across your accounts and a transaction log.
|
|
||||||
|
|
||||||
After running a few of these simple Ledger commands, I had the files I needed: a balance sheet (covering all accounts), a profit & loss account (essentially a balance sheet covering income and expense accounts), and a transaction register. Examples describing how I generated these are shown below (in this case assuming a year-end of 31st December).
|
|
||||||
|
|
||||||
**Balance sheet:** To generate the balance sheet I used `ledger balance -b 2020/01/01 -e 2021/01/01`, which outputs something along the lines of:
|
|
||||||
|
|
||||||
```
|
|
||||||
£-XXXX.XX Assets
|
|
||||||
£XXXX.XX Bank 1
|
|
||||||
£-XXXX.XX Bank 2
|
|
||||||
£XXXX.XX Equity:Shareholder:Dividends
|
|
||||||
£XXXX.XX Expenses
|
|
||||||
£XXX.XX Advertising
|
|
||||||
£XX.XX Compliance
|
|
||||||
£XX.XX Domains
|
|
||||||
£XXX.XX Hosting
|
|
||||||
£XXX.XX Services
|
|
||||||
£XXX.XX Accounting
|
|
||||||
£X.XX Banking
|
|
||||||
£XX.XX Legal
|
|
||||||
£XXX.XX Software
|
|
||||||
£XXXX.XX Tax:Corporation
|
|
||||||
£-XXX.XX Income:Sales:Product
|
|
||||||
--------------------
|
|
||||||
0
|
|
||||||
```
|
|
||||||
|
|
||||||
**Profit & loss account:** The rough "P&L" was generated with `ledger balance -b 2020/01/01 -e 2021/01/01 income expenses`:
|
|
||||||
|
|
||||||
```
|
|
||||||
£XXXX.XX Expenses
|
|
||||||
£XXX.XX Advertising
|
|
||||||
£XX.XX Compliance
|
|
||||||
£XX.XX Domains
|
|
||||||
£XXX.XX Hosting
|
|
||||||
£XXX.XX Services
|
|
||||||
£XXX.XX Accounting
|
|
||||||
£X.XX Banking
|
|
||||||
£XX.XX Legal
|
|
||||||
£XXX.XX Software
|
|
||||||
£XXXX.XX Tax:Corporation
|
|
||||||
£-XXX.XX Income:Sales:Product
|
|
||||||
--------------------
|
|
||||||
£XXXX.XX
|
|
||||||
```
|
|
||||||
|
|
||||||
(Where the final line indicates the overall balance between income and expenses).
|
|
||||||
|
|
||||||
**Transaction log:** The register was generated using `ledger register -b 2020/01/01 -e 2021/01/01`. I won't include a sample below, as a transaction log is mostly obvious. I also generated it in CSV format in case this made it easier for the accountant at all: `ledger csv -b 2020/01/01 -e 2021/01/01`.
|
|
||||||
|
|
||||||
I placed the outputs from these commands into separate files and sent them to the accountant, who was then able to submit the company accounts without needing Xero. This was a great experience, as it gives me confidence in the end-to-end functionality of Ledger (and other similar command-line accounting tools). Writing and keeping books using plain-text files is quicker than Xero (which can be quite clunky), and now I can also easily get the information out the other end too. And it's free!
|
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-18T12:09:00Z"
|
|
||||||
title: "The Giver of Stars by Jojo Moyes"
|
|
||||||
description: "Some thoughts on the book 'The Giver of Stars' by Jojo Moyes."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: giver-of-stars
|
|
||||||
---
|
|
||||||
|
|
||||||
[The Giver of Stars](https://www.goodreads.com/book/show/43925876-the-giver-of-stars) by [Jojo Moyes](https://www.goodreads.com/author/show/281810.Jojo_Moyes) tells the story of a young English woman - Alice - who marries an American man and moves to a small town in Kentucky in the late 1930s.
|
|
||||||
|
|
||||||
![The Giver of Stars book cover](/media/blog/giver_of_stars.jpg)
|
|
||||||
|
|
||||||
Not long after arriving in Kentucky Alice realises she may have made a mistake when it comes to her new husband. However, the real story focuses around a job Alice gets working with the local library.
|
|
||||||
|
|
||||||
The library begins offering a new service, in which the (female) librarians travel around the local area (often hard to traverse due to mountainous terrain) on horseback to deliver books to those unable to get to town or who wouldn't usually engage with the library. The concept is based on a real project - the [Pack Horse Library Project](https://en.wikipedia.org/wiki/Pack_Horse_Library_Project) - and Alice and the other women are met with many different types of personalities on their rounds.
|
|
||||||
|
|
||||||
There are focuses on racism, sexism, misogyny, domestic abuse, murder, and much more in the story, and the librarians are faced with a number of hugely difficult situations both when at work and when at home.
|
|
||||||
|
|
||||||
The story was fantastic and engaging. I enjoyed the scene-setting, and could easily picture the local town and all the surrounding countryside. You feel an undeniable sense of unfairness in the world as the story progresses - in which rich white men nearly always get their own way in most situations - however the bond that builds between the characters, and their shared experiences, show that this can be overcome.
|
|
@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-25T16:41:00Z"
|
|
||||||
title: "Steve Jobs by Walter Isaacson"
|
|
||||||
description: "Some thoughts on the Steve Jobs biography by Walter Isaacson."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: steve-jobs
|
|
||||||
---
|
|
||||||
|
|
||||||
I was recently asked whether Steve Jobs was someone that inspired me. It's a difficult question, I find; he's definitely an inspiring person in the sense of his work ethic, the products he envisages, and his way of understanding the needs of the target customer better than they know it themselves.
|
|
||||||
|
|
||||||
As a person, however, I find his personality and the way he treats others less inspiring. I try to be empathetic to others and take into account the emotional and psychological position of someone else when interacting with them. In a professional workplace this (hopefully) contributes towards creating a space that enables people to grow and develop whilst also emboldening colleagues to put forward their own thoughts and opinions in a more risk-free environment.
|
|
||||||
|
|
||||||
Jobs, on the other hand, has his own vision and - although these visions, if executed, are bound to be successful - you'll need to be on _his_ train in order to succeed in working with him.
|
|
||||||
|
|
||||||
![Steve Jobs biography book cover](/media/blog/steve_jobs.jpg)
|
|
||||||
|
|
||||||
The reason my colleague asked me this question was because I was reading the [Steve Jobs biography](https://www.goodreads.com/book/show/11084145-steve-jobs) by [Walter Isaacson](https://www.goodreads.com/author/show/7111.Walter_Isaacson) at the time. The biography's subject is not a hero of mine in any way, but he is indisputably a legend in the consumer technology space and so his story definitely deserves knowing (whatever your particular stance is).
|
|
||||||
|
|
||||||
Although I knew the rough story of his life - his co-founding of Apple with Steve Wozniak, his time and successes at Pixar, his founding of NeXT before his subsequent return to Apple and eventual battle with cancer - understanding how individual products came to be imagined and created was fascinating.
|
|
||||||
|
|
||||||
His relationships with others - friends, colleagues, competitors, and romances - undoubtedly helped shape his life and his successes. His obsessions over food, art (and the appearance of products, both outside and within) and his focus on work right to the end were certainly areas I did not know about, but it's clear that these all contribute towards what he managed to achieve.
|
|
||||||
|
|
||||||
I know that a lot of people don't like Jobs, or don't agree with the type of closed end-to-end technology he pioneered and obsessed over (myself included), however his achievements - even by the age of 30 - and his focus on the end goal should definitely be an inspiration to all technologists.
|
|
@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-26T10:48:00Z"
|
|
||||||
title: "My appearance in the Wales \"35 Under 35\""
|
|
||||||
description: "Back in December I was lucky enough to be included in the Wales '35 Under 35'."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
slug: 35-under-35
|
|
||||||
---
|
|
||||||
|
|
||||||
This is a bit of a vanity post, but back in December I was lucky enough to be included in the 2020 [WalesOnline "35 Under 35"](https://www.walesonline.co.uk/news/wales-news/walesonline-35-under-35-top-19351410).
|
|
||||||
|
|
||||||
This list aims to present the "best young businessmen in Wales" for the year. It was definitely an honour to be included and it's great to see the efforts from the whole team at [Simply Do](https://www.simplydo.co.uk) reflected. We're still only at the beginning of our journey and so we have an exciting few years ahead!
|
|
@ -1,55 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-04-27T21:00:00Z"
|
|
||||||
title: "Starting out with the Pinephone"
|
|
||||||
description: "My initial plans with the Pinephone."
|
|
||||||
tags: [100daystooffload, technology, pinephone]
|
|
||||||
slug: pinephone
|
|
||||||
---
|
|
||||||
|
|
||||||
As you may know, I [recently purchased the beta edition of the Pinephone](/blog/2021/03/27/pinephone-pinetime). It arrived last week in the _Pinephone Beta Edition_ box shown below.
|
|
||||||
|
|
||||||
![Pinephone beta box](/media/blog/pinephone.jpg)
|
|
||||||
|
|
||||||
As mentioned in my previous post on the subject, I bought the phone for purely experimental purposes, to get involved in the community, and to be a part of the freedom and Linux-on-phone movement.
|
|
||||||
|
|
||||||
I fully understand that the device is not yet really considered ready for every-day reliable production use (especially when compared to my current iPhone 11 Pro Max). However, [the Pinephone](https://wiki.pine64.org/index.php/PinePhone) is less than 20% the price of my iPhone, and comes with the freedom to do so much more - without the restrictions of Apple's "walled garden".
|
|
||||||
|
|
||||||
However, I am very excited to see what _can_ be done with it. At the end of the day, it's just an ARM-based _computer_ with support for running mainline Linux and the added benefit of having cellular capabilities to make phone calls and handle data connections.
|
|
||||||
|
|
||||||
It's also super easy to try out different [operating systems](https://wiki.pine64.org/wiki/PinePhone_Software_Releases) by simply `dd`-ing to an SD card - much easier than the tedious root-recovery-flash song and dance often required in the Android ecosystem.
|
|
||||||
|
|
||||||
# The next few weeks
|
|
||||||
|
|
||||||
Anyway, I'm going a little off-topic. My initial plans aren't to try out new operating systems just yet (although I am excited to try). Instead, I'd like to spend the first few weeks tinkering with the out-of-the (beta) box underlying system and seeing how well it _does_ handle my day-to-day tasks on an as-is (i.e. without needing to change SD card) basis.
|
|
||||||
|
|
||||||
The beta edition comes pre-installed with [Manjaro Linux](https://manjaro.org) on the eMMC along with the [KDE Plasma Mobile](https://www.plasma-mobile.org) desktop environment, so this is what I'll stick with for now. Upon initial boot-up I can already see that it comes pre-installed with some useful packages (e.g. Telegram messenger and the [Megapixels camera application](https://git.sr.ht/~martijnbraam/megapixels)).
|
|
||||||
|
|
||||||
However, below is a list of day-to-day tasks I can do on my current phone and which I will try and accomplish using the device over the next few weeks.
|
|
||||||
|
|
||||||
- Basic calls and texts.
|
|
||||||
- 4G cellular data connectivity.
|
|
||||||
- WiFi connectivity.
|
|
||||||
- Bluetooth connectivity (including headphones).
|
|
||||||
- Photo- and video-taking using both front- and rear-facing cameras.
|
|
||||||
- Web browsing.
|
|
||||||
- Podcast subscribing, listing, and listening.
|
|
||||||
- Audiobook downloading and listening.
|
|
||||||
- Music-playing (preferably through Spotify).
|
|
||||||
- Mastodon (tooting and reading my timelines).
|
|
||||||
- Twitter.
|
|
||||||
- RSS (viewing my feeds from my FreshRSS server).
|
|
||||||
- Email reading and sending.
|
|
||||||
- Telegram messaging.
|
|
||||||
- Password-management.
|
|
||||||
|
|
||||||
I've purposefully kept a couple of things off this list - including Whatsapp, my bank's app, and some enterprise apps I use for work - since these systems are proprietary in nature and so would not be fair to expect of the phone. One could argue that this impacts its viability as a daily-driver, however that is not my current goal. Presently I am just looking to see how well some basic tasks can be accomplished before trying to take it further to be fully useful for daily use.
|
|
||||||
|
|
||||||
I also want to document the journey for myself and others wanting to get involved in this project.
|
|
||||||
|
|
||||||
Projects like [Anbox](https://linmob.net/2020/08/15/anbox-on-the-pinephone.html) look like potential routes for getting additional things working when needed in a pinch. However I'll save that for another time.
|
|
||||||
|
|
||||||
# Next
|
|
||||||
|
|
||||||
Check back in a few weeks to see how I get on. If you have any advice for starting out in this way then please let me know!
|
|
||||||
|
|
||||||
After this initial period I will look to try out other shells and underlying systems. The [ARM Arch with Phosh project](https://github.com/dreemurrs-embedded/Pine64-Arch) looks like a good start point for when I come to this.
|
|
@ -1,25 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-05-04T18:44:00Z"
|
|
||||||
title: "Go Time"
|
|
||||||
description: "Some thoughts on the Go Time podcast."
|
|
||||||
tags: [100daystooffload, technology, podcast]
|
|
||||||
slug: gotime
|
|
||||||
---
|
|
||||||
|
|
||||||
I listen to a number of podcasts each week. One of these is [Go Time](https://changelog.com/gotime).
|
|
||||||
|
|
||||||
![Go Time logo](/media/blog/go-time.png)
|
|
||||||
|
|
||||||
The Go Time podcast releases episodes every Thursday. Its format is mostly comprised of panel discussions and interviews with founders and specialists in the community about the [Go programming language](https://golang.org). Episodes are usually between 60 and 90 minutes long.
|
|
||||||
|
|
||||||
I don't program in Go a lot myself these days, though do have one or two older [projects](/projects) written in the language. However, I feel that the content is often broadly relevant for non-full-time gophers - like myself - also.
|
|
||||||
|
|
||||||
The episodes include discussions around a diverse variety of topics - such as testing, networking, web-apps, tooling, startups, programming principles, and much more. Many of these concepts are interesting to gophers and non-gophers alike, as they touch on the broader problems as well as to dicuss how Go can specifically be used to solve these problems.
|
|
||||||
|
|
||||||
Recently I have started using the [Rust language](https://www.rust-lang.org) more and more, and particularly on [this side project](https://git.wilw.dev/wilw/capsule-town) which I have used as a mechanism for learning the ins-and-outs. Although the two languages (Go and Rust) are by no means the same, they do share a small number of similar attributes and I have found that the Go Time podcast has often touched on topics relevant to both languages.
|
|
||||||
|
|
||||||
Episodes also feature interesting guests from a variety of backgrounds - from specialists in the community through to startup founders. Hearing their stories is always great. Additionally, the show hosts are engaging and add light-heartedness to what can be deep technical conversations.
|
|
||||||
|
|
||||||
If you're a programmer, and even if not a gopher yourself, I recommend checking out a few of the episodes to see if you agree.
|
|
||||||
|
|
||||||
It should come up in your podcast app if you search for "Go Time". I use [Overcast](https://overcast.fm) on iOS, and if you do also you can subscribe at [this link](https://overcast.fm/itunes1120964487/go-time).
|
|
@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-05-05T19:50:00Z"
|
|
||||||
title: "Data Sovereignty"
|
|
||||||
description: "Brief thoughts on the meanings of 'data sovereignty'."
|
|
||||||
tags: [100daystooffload, technology]
|
|
||||||
---
|
|
||||||
|
|
||||||
The term 'data sovereignty' is something we hear much more about these days. Increasingly I've also heard it being mentioned in different contexts.
|
|
||||||
|
|
||||||
We've seen it more in the world of enterprise SaaS; particularly in the case of UK-based public sector organisations amid post-Brexit data flow policies. More and more organisations are getting stricter in the geographic location of their users' data. Whereas before most organisations would be happy as long as the data is stored somewhere within the EU, they would now require it to be stored onshore within the UK.
|
|
||||||
|
|
||||||
They call this _data sovereignty_. At our company we're lucky to be agile enough to adapt and change our service offering to enable UK-only data processing and storage. However I can imagine many larger organisations might experience more inertia. Interestingly though, finding a UK-only mail provider isn't as easy as it sounds - most such services offer "EU" or "US" servers, but stop there (potential SaaS service offering there: UK-based mail provider).
|
|
||||||
|
|
||||||
The other place I've been hearing the term is in the indie tech and self-hosted community. In this case the data sovereignty concept relates more to data _ownership_, where the individual maintains control over their own data, where it is stored, how it is processed, and often goes as far as to keep their own data at home (for example, self-hosted setups using home servers).
|
|
||||||
|
|
||||||
I'm definitely in this camp too; whilst I don't keep stuff stored at home, I do keep my own data - when possible - on private servers in a secure datacentre or on services I trust. Things just feel more in-control with this approach.
|
|
||||||
|
|
||||||
Without data sovereignty, people are at risk of losing data they think they "own". For example, someone [recently lost access to their iCloud data](https://dcurt.is/apple-card-can-disable-your-icloud-account) because of issues with an unrelated service.
|
|
||||||
|
|
||||||
There's not much more to this post. I just think it's interesting that we're hearing more and more of the same phrase being used in different contexts by different groups of people and organisations.
|
|
@ -1,65 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-05-09T21:31:00Z"
|
|
||||||
title: "Self-hosted notes and to-do lists"
|
|
||||||
description: "How I keep notes and to-do lists."
|
|
||||||
tags: [100daystooffload, technology, selfhost]
|
|
||||||
slug: notes-todos
|
|
||||||
---
|
|
||||||
|
|
||||||
In this post I will talk a little about how I handle my digital notes and to-do lists. In the spirit of my last post on [data sovereignty](/blog/2021/05/05/data-sovereignty), the focus will be on self-hosted approaches.
|
|
||||||
|
|
||||||
## To-do list management
|
|
||||||
|
|
||||||
It feels odd that the first task many new technical frameworks guide users through, by way of a tutorial, is a simple to-do list; yet finding great production-ready examples of such software can be challenging.
|
|
||||||
|
|
||||||
It's a pretty personal space. Although there are awesome time management processes out there (such as the [pomodoro technique](https://en.wikipedia.org/wiki/Pomodoro_Technique)), at the end of the day everyone is unique and what works for one person doesn't necessarily work for others.
|
|
||||||
|
|
||||||
A few years ago I got quite heavily into [Todoist](https://todoist.com). It's a very feature-rich platform with great apps across web, desktop, and mobile. It supports tagging, projects, deadlines, sub-tasks, and much more.
|
|
||||||
|
|
||||||
However, it's almost _too_ feature rich, and I find this can distract from the intended simplicity of to-do lists. Whilst it's important to set up a process that allows you to work effectively, spending too long configuring and reconfiguring things is counter-productive.
|
|
||||||
|
|
||||||
It also means that your data is held elsewhere and out of your control. A better solution might be one that you can keep local and sync or self-host.
|
|
||||||
|
|
||||||
There are [a few examples of open-source to-do list alternatives](https://github.com/awesome-selfhosted/awesome-selfhosted#task-managementto-do-lists) that you can self-host. The one I use is [Taskwarrior](https://taskwarrior.org).
|
|
||||||
|
|
||||||
> Taskwarrior is Free and Open Source Software that manages your TODO list from the command line. It is flexible, fast, and unobtrusive. It does its job then gets out of your way. - _taskwarrior.org_
|
|
||||||
|
|
||||||
I like Taskwarrior for many reasons. But mainly it's the speed and clarity of use - it really does just "get out of your way". Tags and projects are created automatically for you as you go, and querying feels as fast and sensible as [Ledger](https://www.ledger-cli.org) is for accounts.
|
|
||||||
|
|
||||||
I have my terminal open all of the time anyway, and so I can quickly and at any time view my current list (by running `task`), and see my currently in-play tasks listed right at the top.
|
|
||||||
|
|
||||||
I can also query by tag (`task ls +tagname`), or to-dos for a specific project (`task ls project:projectname`).
|
|
||||||
|
|
||||||
Adding todos is also just as easy, and arguably quicker than commercial offerings like Todoist. E.g. if I wanted to add a new task to buy that gift for my friend (and tag it as "life"), I can just run `task add +life Buy gift for Sam` and then forget about the task for now. I can then check out my "life" todos (`task ls +life`) at a time when I'm out of work and have time to actually complete such tasks.
|
|
||||||
|
|
||||||
I'm on a Mac, and so I just used `brew install task` to install it. There is likely a [package for your own distribution](https://taskwarrior.org/download) too.
|
|
||||||
|
|
||||||
In terms of self-hosting for multi-device setups, there is the [Taskserver](https://github.com/GothenburgBitFactory/taskserver) project from the same developers, and this is the recommended approach. For me, however, I only use Taskwarrior on one device and so I backup my tasks by simply syncing them to my Nextcloud. To do so, I just edited the relevant line in `~/.taskrc`:
|
|
||||||
|
|
||||||
```
|
|
||||||
...
|
|
||||||
data.location=~/Nextcloud/tasks
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
There is much more you can do with Taskwarrior, should you wish (including things like theming and task prioritising). I can certainly recommend taking a look through [the documentation](https://taskwarrior.org/docs) for more information.
|
|
||||||
|
|
||||||
## Notes (and notebooks)
|
|
||||||
|
|
||||||
Sometimes you just can't beat an old fashioned pen-and-paper notebook. The process of physically writing things down definitely seems to have a psychological effect on my ability to remember things. However, this approach isn't really compatible with my other information-oriented habits (particularly backup paranoia and [minimalism](/blog/2021/03/08/getting-mail)).
|
|
||||||
|
|
||||||
The same concepts around organising notes into notebooks, and keeping things logically organised, can still be applied to digital note-taking too.
|
|
||||||
|
|
||||||
There are a number of free and commercial offerings that exist. [Simplenote](https://simplenote.com) is great (though perhaps a little _too_ simple). For Apple users, [Bear](https://bear.app) is also good, but potentially locks you into the Apple ecosystem.
|
|
||||||
|
|
||||||
For some time I've used [Obsidian](https://obsidian.md). I like Obsidian as it just uses your filesystem as a way of organising notes (directories are "notebooks" and each note is a simple markdown file). This approach also makes syncing over Nextcloud super easy (just set your Obsidian vault to a directory in your local Nextcloud sync folder, and away you go). There is also a mobile app that's currently in closed beta.
|
|
||||||
|
|
||||||
Recently I've been trying to get more into [Joplin](https://joplinapp.org). I like this software because it is open-source, has a terminal interface as well as GUI ones, and has mobile apps available for note-taking on-the-go.
|
|
||||||
|
|
||||||
Joplin also has [native sync-ability with Nextcloud](https://joplinapp.org/#nextcloud-synchronisation), which is useful for backup and cross-device access. I find searching quick and intuitive, and the note editor uses Vim (by default, at least), which is great for easy editing.
|
|
||||||
|
|
||||||
All in all, I still teeter on the edge between Obsidian and Joplin - both are great options and are worth exploring for your own use.
|
|
||||||
|
|
||||||
## Open to ideas
|
|
||||||
|
|
||||||
I'm definitely open to other ideas for both note-taking and to-do list management. If you have any good examples of software to help with either of these then please get in touch!
|
|
@ -1,64 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-05-12T19:20:00Z"
|
|
||||||
title: "Running"
|
|
||||||
description: "Workouts, adopting a dog, and getting back into running."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
---
|
|
||||||
|
|
||||||
## The effects of working from home
|
|
||||||
|
|
||||||
The UK went into its first proper COVID-induced lockdown back around March time last year. At this time, our company locked its office doors and we all began working from home. We're now all still working remotely about 14 months later and will continue to do so for the forseeable future.
|
|
||||||
|
|
||||||
Before we closed the office, I used to walk across my city - Cardiff - to get to work. It's about a 3km walk, which would take me about 30 minutes to walk each way. I enjoyed the walk - I could stop for coffee on the way through, and the distance meant I could take different routes on different days if I wanted a change of scene.
|
|
||||||
|
|
||||||
![Walking through Cardiff](/media/blog/running1.jpg)
|
|
||||||
|
|
||||||
Now, and since last March, my daily commute simply involves me walking down the stairs to the corner of my living room that is my home "office". Whilst it is definitely convenient (and I would certainly prefer this to full-time back in an office), it had its downsides.
|
|
||||||
|
|
||||||
For the first few weeks, I just felt _lazy_. I was working hard (we all were, and were performing great as a remote team), but my body almost craved that morning walk. The walk was time that enabled my mind to sort itself out ready for the day of work, meetings, decisions, and everything else.
|
|
||||||
|
|
||||||
Without that walk time I felt my work starts were slower, and I was more easily distracted in the mornings. To try and alleviate this a little, I began walking around a park near me each evening after work - this definitely helped me wind down and the effects lasted until the following day.
|
|
||||||
|
|
||||||
## 🏋️♂️ Workouts
|
|
||||||
|
|
||||||
Around the same time I began working from home full-time, my brother told me about an app - [Fitbod](https://fitbod.me) - that aims to be like a mini personal trainer. It's not the only app of its type around, but it caught me at the right time.
|
|
||||||
|
|
||||||
I thought that having an additional exercise goal each day - as well as the evening walk - would help in making me feel more invigorated. I began using it in the afternoons after I had finished my main work for the day (before my walk).
|
|
||||||
|
|
||||||
Daily workouts, just simple ones at home following the app's instructions, definitely had a positive effect on my mental wellbeing - it felt almost like personal meditation time for me.
|
|
||||||
|
|
||||||
It wasn't long before I switched the routine to morning workouts (before work or after my first meetings of the day). This definitely helped my work too. I've been doing the same thing ever since (I think I've only missed 10 or so days of workouts in total for the whole of the last year).
|
|
||||||
|
|
||||||
## 🐶 Adopting a dog
|
|
||||||
|
|
||||||
In early December we adopted a dog, and this flipped things on their head a bit. Suddenly control over my own life changed slightly, as I now had someone to be responsible for and think of - at many times before myself. I'll write more about my dog in a later post, but will move on back to exercise for now.
|
|
||||||
|
|
||||||
Since getting the dog, I no longer had times for nice leisurely walks after work or workouts in the morning. I now had a new member of the family that needed to be walked once or twice a day, and entertained during the times at home.
|
|
||||||
|
|
||||||
People who tell you that you do more exercise when you have a dog are lying. When "walking" him, my time is spent mostly standing around whilst he runs and plays with his friends in the park. It's the only way he can get real exercise - walking him on a lead on my usual walk (especially a dog with as much energy as mine!) just does not give him the exercise he needs.
|
|
||||||
|
|
||||||
I wanted to find a way to maintain my level of exercise whilst also giving me the time to go to the parks for 1-2 hours each day to allow my dog to run around properly off the lead. (Note: we live in a city, and it's not very convenient to have to drive out to countryside trails every day).
|
|
||||||
|
|
||||||
## 🏃♂️ (Re-)starting to run
|
|
||||||
|
|
||||||
Some of the people I met in my local dog-walking friend group are quite heavily into running. I used to love running in my mid-20s, and would jog 15km or so three times a week. I was put off as had been told by some people that it can have long-lasting damage on knees and other joints, and so I stopped for several years.
|
|
||||||
|
|
||||||
Coincidentally I had recently been doing research about the long-term effects of running, and the results are mixed; some studies indicate what I had heard from others (about joint issues), but many talked about the benefits of building leg muscles and how this might even protect the joints. It also turns out that running properly and with good equipment (i.e. trainers) also makes a big positive difference.
|
|
||||||
|
|
||||||
I thought that running could be a good replacement for my walk and some of my workout time - it burns the calories, helps maintain fitness, and has many positive psychological effects too. Especially if I could do it a few times a week.
|
|
||||||
|
|
||||||
The dog-walking friends mentioned a shop nearby that could run some gait analysis with me and suggest running trainers most appropriate for me. I booked an appointment, ran the analysis, ordered the trainers, and within a week had them collected and at home.
|
|
||||||
|
|
||||||
## The first few weeks
|
|
||||||
|
|
||||||
I'm now about three weeks back into running, so I thought I'd report on how it's going.
|
|
||||||
|
|
||||||
I thought I'd be much more of a mess than I actually am. I'm by no means quick (I do about 5:30 minutes per km on a good day), but I'm getting faster and definitely feel more fit. There's certainly some muscle memory there still after all of these years.
|
|
||||||
|
|
||||||
I run an average of three times per week, and go about 6km each time. I run first thing in the mornings before doing my workout, and then work. This then gives me the time I need to give the dog a chance to run around after work.
|
|
||||||
|
|
||||||
On the days I don't run in the morning, I instead go for a 30 minute walk with the dog.
|
|
||||||
|
|
||||||
The routine is good (I ❤️ routine), I get the same (if not more) exercise than before, and my dog gets more running time too. It means I need to get up earlier in the morning (more about that in a future post), but I actually quite enjoy that.
|
|
||||||
|
|
||||||
The main thing is that I no longer feel the _laziness_ I felt before. I start work with a good hour's worth of solid exercise done every day, a nice cup of coffee, and much more focus.
|
|
@ -1,91 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-05-18T22:13:00Z"
|
|
||||||
title: "How I back-up my personal server"
|
|
||||||
description: "How I back-up the cloud server I use for self-hosting all of the things."
|
|
||||||
tags: [100daystooffload, technology, selfhost]
|
|
||||||
slug: b2-backups
|
|
||||||
---
|
|
||||||
|
|
||||||
For a couple of years now I have been using a self-hosted [Nextcloud](https://nextcloud.com) as a replacement for iCloud and Google Drive. I won't go into the details as to why (especially given the additional upkeep and other overheads required), as this has been covered before - but mainly it's about maintaining control over my data.
|
|
||||||
|
|
||||||
I use a cloud VPS to host my Nextcloud instance - rented from [Linode](https://www.linode.com), whom I can certainly recommend if you're looking for a good VPS provider - and since starting my Nextcloud journey I have begun hosting a number of additional services on the same server. For example, [FreshRSS](https://www.freshrss.org) (which I consume using [Reeder](https://www.reederapp.com)), [Monica](https://www.monicahq.com), [Gitea](https://gitea.com), a [Matrix server](https://matrix.org), and more.
|
|
||||||
|
|
||||||
Considering the pervasiveness this one machine has with respect to my data and day-to-day life, and the impact it would have if I were to lose access to it, having backups for it is crucial.
|
|
||||||
|
|
||||||
## 3, 2, 1, Backup!
|
|
||||||
|
|
||||||
Linode offers a backup service for servers, which takes periodic snapshots in order to enable easy recovery of a service, or lost data. That's one layer, but what happens if Linode itself experiences problems or if I lose access to my account for any reason? Having all of the live data and backups tied to a single provider was definitely a worry for me.
|
|
||||||
|
|
||||||
Many people follow the "3-2-1" rule for backups. This strategy is concerned with - for any piece of data - having at least three copies of that data, two of which stored locally but on different media, and another copy somewhere else (geographically separate).
|
|
||||||
|
|
||||||
Enabling Linode backups allows me to comply with the "3-2" bit of the rule. However, by stopping at this point there is no additional off-site backup in case of catastrophic failure.
|
|
||||||
|
|
||||||
## Finding my "1"
|
|
||||||
|
|
||||||
In order to fully meet the needs of the 3-2-1 strategy, I needed to find a solution for maintaining off-site backups. Additionally, this wouldn't be a one-time backup; ideally I'd need something that could at least back things up on a daily basis (if not more frequently).
|
|
||||||
|
|
||||||
I began researching solutions, but it wasn't long until I settled on [Backblaze B2](https://www.backblaze.com/cloud-storage) - an S3-compatible object storage solution that has great GB/$ pricing. Side note: Linode also offers S3-compatible [object storage](https://www.linode.com/products/object-storage), but that wouldn't help me in this scenario as it'd still be managed by the same provider.
|
|
||||||
|
|
||||||
B2 is cheaper than S3 itself, and also has the benefit of not having to maintain a complex AWS account for simple personal projects.
|
|
||||||
|
|
||||||
## Setting up backups to B2
|
|
||||||
|
|
||||||
Setting up the backups involved a few simple steps:
|
|
||||||
|
|
||||||
1. Creating a new Backblaze B2 account
|
|
||||||
1. Setting-up a bucket on B2
|
|
||||||
1. Writing a script to automate the backup to the B2 bucket
|
|
||||||
|
|
||||||
### 1. Create a Backblaze account
|
|
||||||
|
|
||||||
You get 10GB free on Backblaze. Head over to [the sign-up page](https://www.backblaze.com/b2/sign-up.html) in order to create your account.
|
|
||||||
|
|
||||||
### 2. Set-up your bucket
|
|
||||||
|
|
||||||
Once you've got your account and have verified everything, go to the "Buckets" tab of your Backblaze account's UI, and click "Create a bucket". This will open up a dialog.
|
|
||||||
|
|
||||||
Enter a unique name for your bucket and ensure you mark files as "private". I also turned on default encryption for an extra level of security. When ready, click "Create bucket".
|
|
||||||
|
|
||||||
![The "create bucket" interface](/media/blog/b2-backups-1.png)
|
|
||||||
|
|
||||||
Since we will be periodically backing-up data to this bucket, the bucket will quickly take up more and more space (and cost you more too). As such, I recommend adding a lifecycle rule to tell B2 to automatically delete "old" backup files. To do so, click the "Lifecycle Settings" option on your new bucket, and configure how long you want to keep old file versions around for (I used 10 days):
|
|
||||||
|
|
||||||
![The "lifecycle settings" interface](/media/blog/b2-backups-2.png)
|
|
||||||
|
|
||||||
Finally, we need to create some credentials that will enable the backup system to write files to the bucket. Go to the "App Keys" tab of your B2 dashboard, and click "Add a New Application Key". On this dialog, name your key and ensure this key can only write files to the bucket. You may also want to restrict this key to only work with your specified bucket.
|
|
||||||
|
|
||||||
![The "new application key" interface](/media/blog/b2-backups-3.png)
|
|
||||||
|
|
||||||
Make a note of the `keyID` and `applicationKey` that will be displayed (as well as your bucket's name and the "endpoint" shown on your bucket), as you'll need these later.
|
|
||||||
|
|
||||||
### 3. Write a backup script
|
|
||||||
|
|
||||||
Backblaze does have an API for managing buckets and files, but using this (especially for larger files) felt overly complex. Since B2 is S3-compatible, we can just make use of standard S3 tools, such as `awscli`.
|
|
||||||
|
|
||||||
As such, my script for backups simply creates a tarball containing all of the directories and files I want to backup, and then sends it to B2. This can be as simple as the following:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
tar --warning=no-file-changed -czf /tmp/backup.tar.gz /first/path /second/path
|
|
||||||
aws s3 cp /tmp/backup.tar.gz "s3://$BUCKET/backup.tar.gz" --endpoint-url "$ENDPOINT"
|
|
||||||
```
|
|
||||||
|
|
||||||
Before running the script, ensure that the following environment variables are set:
|
|
||||||
|
|
||||||
- `AWS_ACCESS_KEY_ID` (set to the `keyID` of the B2 key you created)
|
|
||||||
- `AWS_SECRET_ACCESS_KEY` (set to the `applicationKey` of the B2 key)
|
|
||||||
- `BUCKET` (the unique name of the bucket you created on B2)
|
|
||||||
- `ENDPOINT` (the endpoint shown on your bucket on the B2 UI: similar to `https://s3.eu-central-003.backblazeb2.com`)
|
|
||||||
|
|
||||||
If these are correctly set (and dependencies like `awscli` are installed), you should be able to mark the script as executable and then run it to backup the directories `/first/path` and `/second/path` (obviously change these to real paths on your server, and you can always add more).
|
|
||||||
|
|
||||||
You can verify the upload was successful by browsing the bucket on the B2 interface. Please note it can sometimes take a few minutes for files to show up!
|
|
||||||
|
|
||||||
_Note: I use the `--warning=no-file-changed` flag to prevent tar from warning about files that change during the tarball creation process (this happens to me because I backup my Matrix server files too, which change quite frequently as new messages arrive)._
|
|
||||||
|
|
||||||
## Automatic backups
|
|
||||||
|
|
||||||
The above setup is useful for one-off backups, but I wanted to automate the process. This could be as simple as a cron job, but I like Dockerizing things (this makes the environment variables easier to manage too).
|
|
||||||
|
|
||||||
To see my approach to automating the backup, feel free to clone and use the Docker image at [this repository](https://git.wilw.dev/wilw/server-backup).
|
|
@ -1,50 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-05-23T15:04:00Z"
|
|
||||||
title: "The networking mall"
|
|
||||||
description: "Describing the concept of network ports using an example of a shopping mall."
|
|
||||||
tags: [100daystooffload, technology]
|
|
||||||
slug: port-mall
|
|
||||||
---
|
|
||||||
|
|
||||||
Someone non-technical recently asked me the question, "what actually _is_ a server?". They knew it was just a type of computer that runs _somewhere_ that can be accessible over the internet, but they were interested in how they differ from "normal" computers.
|
|
||||||
|
|
||||||
The conversation moved on to how these computers can make several different functions available at the same time over the network, which brought us on to the topic of services and network ports.
|
|
||||||
|
|
||||||
I was considering a few analogies to best describe the concept of services and ports, and then began talking about shopping malls.
|
|
||||||
|
|
||||||
# Shopping malls and servers
|
|
||||||
|
|
||||||
A single shopping mall allows visitors to interact with a large range of different shops and services - such as stores, restaurants, post offices, vending machines, car parks, and more. A single shopping mall is a bit like a computer (or server).
|
|
||||||
|
|
||||||
Each unit (that hosts a service) within the mall is usually numbered, like houses on a street. For example, a specific restaurant might be given the number `2500` within the mall. This allows for each service to be addressed uniquely for easier discovery (e.g. for delivering mail or packages). Although each service can be complex and provide a range of functionality, there can only be one service available at each service number.
|
|
||||||
|
|
||||||
If, for example, I wanted to visit the post office in the mall I might visit unit number `110`. Here I can prove my identity in order to receive mail that they may be holding for me. Bringing this back to severs, this concept is similar to that of using [POP (Post Office Protocol) for retrieving email](https://en.wikipedia.org/wiki/Post_Office_Protocol) from a mail server; I connect to (typically) port `110` on the mail server, authenticate, and then I can download the messages.
|
|
||||||
|
|
||||||
If I wanted to know the time, I might choose to visit an exhibition of old fashioned watches that happens to be on display in unit `37`. Here, I can't interact with the service in a way other than viewing the time (and appreciating the watches), and each person can only stay for a short while. Similarly, in computing, if I connected to port `37` via TCP of a server running the appropriate [Time Protocol service](https://en.wikipedia.org/wiki/Time_Protocol) I should simply receive back the current time.
|
|
||||||
|
|
||||||
If I happened to work in managing the mall, I might visit unit `22` - the manager's office (equivalent to connecting to a server via [SSH](https://en.wikipedia.org/wiki/Secure_Shell_Protocol) on port `22`) and remain there all day until I finish work.
|
|
||||||
|
|
||||||
The analogies can go much further. The essential thing is that - in computing - I can send traffic over a supported protocol to a specific port in able to interact with the type of service at that port. Some services (like the time protocol one above) might just send a response and then close the connection, whereas others (such as SSH) allow for an ongoing connection to be maintained in order to support a rich and feature-ful experience.
|
|
||||||
|
|
||||||
Although many malls have their manager's office at unit `22`, this is just convention and is not a requirement. The SSH daemon (the service that handles the SSH connection) can run on a different port if so desired. Similarly, libraries are often available at unit `80` in many malls - however in some malls there may be multiple libraries available at a range of different unit numbers (and maybe an extra secure library in unit `443`).
|
|
||||||
|
|
||||||
Some malls may have a watch exhibition, but it has been closed by the managers (still sitting in unit `22`). Since I can't get in, I am unable to view the current time even if the exhibition itself still exists (I may not even know there _is_ a watch exhibition).
|
|
||||||
|
|
||||||
Other malls may not have a watch exhibition on at all. If I visited unit `37` of these types of malls it would probably be closed. If the unit happens to be open for some reason, the unit would just be empty and I would not be able to receive the service or interact with it in any way.
|
|
||||||
|
|
||||||
Lots of malls recruit security guards to protect the entrance (and exit) of each unit. These guards (the "firewall") ensure that visitors are allowed into the unit in order to receive the service (even if the unit is open) - perhaps by verifying their proof of address (source IP) - and turn people away if they don't fulfil the requirements. The firewall guards may also prevent people from leaving the unit.
|
|
||||||
|
|
||||||
If someone keeps trying to repeatedly enter a guarded unit without the appropriate information, they might get banned (either temporarily or permanently).
|
|
||||||
|
|
||||||
Additionally, some units may only admit staff that work in other units of the same mall - this could be done by issuing new rules for the firewall guards or perhaps there is a non-public back corridor connecting the units that only mall staff can use (the loopback interface).
|
|
||||||
|
|
||||||
If the mall is closed completely, then I can't reach any of the ports or receive any service. For example, if the server is currently turned off or disconnected from the network.
|
|
||||||
|
|
||||||
## Some differences
|
|
||||||
|
|
||||||
Of course, the mall vs. server anaology isn't perfect. Most servers only have a small handful of ports open at a given time, and these would be heavily restricted with firewalls and other network protections.
|
|
||||||
|
|
||||||
Equally, when someone does visit a server, they usually do so with one goal in mind (e.g. to download mail OR retrieve web content). In reality, visitors may spend a few hours in a mall and visit a large number of different shops and services.
|
|
||||||
|
|
||||||
However, I find this analogy an interesting and useful way to describe some of the basic networking principles.
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-05-26T21:50:00Z"
|
|
||||||
title: "The H.G. Wells Classic Collection"
|
|
||||||
description: "Some thoughts on a collection of sci-fi books by H.G. Wells."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: h-g-wells
|
|
||||||
---
|
|
||||||
|
|
||||||
The [Classic Collection](https://www.goodreads.com/book/show/7069333-h-g-wells-classic-collection-i) of [H.G. Wells](https://www.goodreads.com/author/show/880695.H_G_Wells) novels contains five well-known stories: _The War of the Worlds_, _The First Men in the Moon_, _The Time Machine_, _The Invisible Man_, and _The Island of Doctor Moreau_.
|
|
||||||
|
|
||||||
![The cover of the H.G. Wells Classic Collection](/media/blog/h_g_wells.jpg)
|
|
||||||
|
|
||||||
Despite the fame of these novels, I had never read any of them until I recently listened to them via the [audiobook version](https://www.audible.co.uk/pd/HG-Wells-The-Science-Fiction-Collection-Audiobook/B07PP8N213), which was excellently narrated by the likes of David Tennant, Hugh Bonneville, and others.
|
|
||||||
|
|
||||||
Wells is famous for being an early sci-fi writer (indeed, he is known as the 'Father of Science Fiction'), with his first such book - _The Time Machine_ - being published in 1895.
|
|
||||||
|
|
||||||
Books in this collection have formed the basis of both classic and modern films - perhaps most notably _The War of the Worlds_ (published as a novel in 1898) and _The Invisible Man_ (1897).
|
|
||||||
|
|
||||||
I'm a big lover of science fiction and it seemed sensible to go back to what can be seen as its birthplace. I really enjoyed the collection - each novel is thought provoking and engaging. The novels aren't long and the stories progress quickly, which helps to add to the excitement.
|
|
||||||
|
|
||||||
Although quite light-hearted seeming (in a classic Wellsian and British fashion), and often humorously written, the stories do go into interesting detail around the science and methods behind the phenomena that form the plots. From discoveries of new materials in _The First Men in the Moon_, descriptions of interesting physics in _The War of the Worlds_, and the effects of chemicals on human physiology in _The Invisible Man_, Wells certainly explores the scientific depths whilst also leaving some to the readers' imaginations.
|
|
||||||
|
|
||||||
In many of the books there is a significant level of human self-reflection as part of the story, which I think definitely helps place the stories ahead of their time. Whether talking about alien life, or life at a different point in time, there is definitely a sense of Wells projecting the position of humans in their new contexts, and the characters spend time either thinking or talking about themselves, their achievements (or failures), which gives the notion of humility with regard to human ability.
|
|
||||||
|
|
||||||
To me, _The Island of Doctor Moreau_ stands out as being a little different from the others. Although it is certainly science fiction and is a well-known and acclaimed novel, I found the story's concepts a little strange and did not enjoy it as much as the rest of the collection.
|
|
||||||
|
|
||||||
Either way, if you are interested in science fiction I would strongly recommend this collection of novels and enjoy the basis of - at the time - what would become countless further books and films often based on the same ideas and similar concepts.
|
|
@ -1,53 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-02T21:00:00Z"
|
|
||||||
title: "I can't play games anymore"
|
|
||||||
description: "Why I no longer really play video games."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
slug: gaming
|
|
||||||
---
|
|
||||||
|
|
||||||
## Growing up and the "Warcraft years"
|
|
||||||
|
|
||||||
In my earlier years I was fairly into gaming. I was definitely only ever a "casual gamer" in the scheme of things today, but I would play at least a small amount of _something_ most days.
|
|
||||||
|
|
||||||
When I was young it was mainly those games based on Nintendo platforms - Super Mario, Mariokart, Super Smash Bros, etc. These were great with friends and were the kind of games (along with their various sequels) that we could play over again and for many years to come. Pokemon was also a big hit for me, which would continue on through the consoles.
|
|
||||||
|
|
||||||
As I moved into my early teens, strategy games became more my thing. My elder brother introduced me to [Warcraft](https://en.wikipedia.org/wiki/Warcraft) and I would play and re-play the various campaigns of Warcraft I and Warcraft II.
|
|
||||||
|
|
||||||
When Warcraft III became more relevant to me I would sink hours into playing LAN games with friends and siblings. This was also my first proper foray into online gaming (through [Battle.net](https://en.wikipedia.org/wiki/Battle.net)).
|
|
||||||
|
|
||||||
I didn't really touch the other Blizzard games (Starcraft and Diablo) too much, but did get involved with a few other RPG- and simulation-type ones (The Sims, Rollercoaster Tycoon, and others).
|
|
||||||
|
|
||||||
When [World of Warcraft](https://en.wikipedia.org/wiki/World_of_Warcraft) was released in my early-mid teens, this was a bit of a game-changer. To me, it was the perfect combination of success/reward, social factors, depth of story, and (at the time) a huge world to explore.
|
|
||||||
|
|
||||||
It quickly became my only game and a sort of addiction. My friends and siblings all played it, and it would end up replacing other online hang-out spaces of the era (MSN Messenger). As such, it was much more than just a game to me - and I think many would feel the same. The nostalgia for those early WoW years has always been on my mind - though unfortunately was not really rekindled even when WoW Classic was much later released.
|
|
||||||
|
|
||||||
## Moving to university
|
|
||||||
|
|
||||||
I was still playing WoW for several hours each day, even when I hit my late teens and my A-Level exams. I was lucky enough to scrape the grades needed (despite a distinct lack of study and revision!) and headed to university.
|
|
||||||
|
|
||||||
This is when things began to change a litte. I suddenly had new responsibilities, new friends, and new experiences. Gaming took a rather sudden back-seat to everything else that was going on in my life - new people, exploring, partying (and learning, I guess).
|
|
||||||
|
|
||||||
As I progressed through university, I would still play sporadically. But this would only be in my "home" life - the life I had when I visited my family and younger siblings. They were still at an age of no responsibility, and so gaming would be a natural pastime. We would play [Minecraft](https://en.wikipedia.org/wiki/Minecraft), [Garry's Mod](https://en.wikipedia.org/wiki/Garry%27s_Mod), and other similar games.
|
|
||||||
|
|
||||||
If we went on family holidays together I might buy the latest Pokemon RPG to play together, but this would quickly be forgotten afterwards as I got back to my main life.
|
|
||||||
|
|
||||||
When I play games with family I really enjoy it, and since in these cases I am on holiday anyway it feels like real leisure time.
|
|
||||||
|
|
||||||
## Now
|
|
||||||
|
|
||||||
When I left university to begin full-time work my life changed again. I now had even less time, and I felt no real motivation to game even in my spare time. The last real game I played was Animal Crossing: New Horizons when that hit the Switch during last year's lockdown, but that's it really aside from occasional bouts with my siblings.
|
|
||||||
|
|
||||||
This post sounds pretty downbeat, but I don't mean for it to at all. I actually love games as a concept - they are a work of art and often reflect years of creative input across gameplay, story, graphics, music, and more.
|
|
||||||
|
|
||||||
I love that small indie (and solo) developers can be given a platform to sell from that enables them to compete with the larger studios. I am fascinated by all the different genres, and the new twists we see on these year on year.
|
|
||||||
|
|
||||||
I still follow lots of gaming news and am interested in keeping up to date with developments.
|
|
||||||
|
|
||||||
It's weird - I just don't have the motivation to play games myself anymore.
|
|
||||||
|
|
||||||
I know games should be used as an opportunity to wind down and relax, but every time I do my mind protests, "you should be doing something more productive". I don't know why this is, and why I can't seem to shut down any more, but there it is. I can't really sit and watch TV either.
|
|
||||||
|
|
||||||
I would be more comfortable spending my "downtime" continuing working, learning, and improving myself. This all sounds very noble, but it's actually pretty frustrating. I enjoy learning and working, but understand the importance of being able to shut down and relax once in a while.
|
|
||||||
|
|
||||||
Does anyone have any experience with this or have any tips?
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-10T08:04:00Z"
|
|
||||||
title: "Married"
|
|
||||||
description: "I got married."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
---
|
|
||||||
|
|
||||||
Just a quick post to say that I recently got married! By coincidence the event was three years to the day after our engagement.
|
|
||||||
|
|
||||||
It was a lovely day - great weather and really nice to see those that could attend. Hopefully we'll get a chance to go away later in the year if/when things start opening up again 😁
|
|
@ -1,189 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-11T19:00:00Z"
|
|
||||||
title: "City-centre beekeeping"
|
|
||||||
description: "Our experience in keeping bees in a city."
|
|
||||||
tags: [100daystooffload, life]
|
|
||||||
slug: beekeeping
|
|
||||||
---
|
|
||||||
|
|
||||||
## Getting and "installing" the nuc
|
|
||||||
|
|
||||||
For his birthday a few years back, I bought my ([now-](/blog/2021/06/10/married))husband a beehive and a [honeybee nucleus](https://en.wikipedia.org/wiki/Nuc).
|
|
||||||
|
|
||||||
Some might see this as a strange gift, especially given that we live close to the city centre. It was certainly a surprise for him, but given his love for animals and science I knew he would like it.
|
|
||||||
|
|
||||||
We were lucky enough to have a relatively large garden, given our location, of around 20 metres in length. Since we didn't really use the end of the garden much, it was a good location for hive - though other people have successfully kept bees in much smaller areas and on rooftops.
|
|
||||||
|
|
||||||
The hive is an [Omlet Beehaus](https://www.omlet.co.uk/shop/beekeeping/beehaus), which features two entrances (one on either end). This means that - using the included divider board - one can keep two separate colonies, and the distance between the entrances is sufficient to ensure the bees know which one is theirs.
|
|
||||||
|
|
||||||
![The beehive and nucelus](/media/blog/bees1.jpg)
|
|
||||||
|
|
||||||
_The image above shows the Beehaus hive and the nuc box on the floor. The bees are being added to the right-hand side colony and there is a divider board in place._
|
|
||||||
|
|
||||||
I collected the nuc from a farm in Oxfordshire, England - a few hours' drive from our home. The nuc box had to be kept cool on the way home and the bees also needed water. It was a little disconcerting travelling with a box full of bees!
|
|
||||||
|
|
||||||
After we got them home, we moved the bees to the hive by transferring the nuc frames (which are shorter than normal brood frames) from the nuc box to the brood box of the Beehaus.
|
|
||||||
|
|
||||||
## The queen
|
|
||||||
|
|
||||||
Luckily, the queen had already been marked and so she was relatively easy to identify. Each colony has one queen, which is marked by some harmless ink on the back of her body to make her more visible. There is a [recognised standard](http://beespoke.info/2014/04/01/queen-marking-colours) for marking queens based on the year in which they are born - ours was white for "2016".
|
|
||||||
|
|
||||||
![Frame of bees and brood](/media/blog/bees2.jpg)
|
|
||||||
|
|
||||||
_The image above shows a frame of capped brood in our nuc. The white marked queen is visible near the centre bottom of the frame._
|
|
||||||
|
|
||||||
Queen bees are extremely important for the correct function of a honeybee colony. They lay the eggs that become new bees, they secrete hormones and other scents that control the colony in various ways, they dictate the _mood_ of the colony, they choose when it's time to swarm, and much more.
|
|
||||||
|
|
||||||
Without a queen present, the bees will try and create a new queen out of an existing larvae at the right stage of development. Without an egg-laying queen, a colony would eventually die out.
|
|
||||||
|
|
||||||
Our first queen was lovely. The colony was super-calm (we often wouldn't need a suit for inspections), there was always a healthy brood, and the honey collection was strong even from the start.
|
|
||||||
|
|
||||||
## The first few weeks
|
|
||||||
|
|
||||||
We got the bees at the start of summer. Since a new colony from a nuc takes some time to get up to full-size, it was unlikely that they'd swarm the first year. Swarms take place in the summer months, but only if certain conditions are met.
|
|
||||||
|
|
||||||
This was good, as it gave us a year to get the hang of things before having to deal with a swarm!
|
|
||||||
|
|
||||||
Over the first couple of months, the colony grew in size and we gradually swapped out the nuc frames for full-size brood frames in order to give the bees space to grow (i.e. create the cells for storing eggs, larvae, pollen, and honey).
|
|
||||||
|
|
||||||
The workers (female bees) were out in full-force collecting pollen, and the drones would do their thing (usually nothing) back at the hive.
|
|
||||||
|
|
||||||
Workers can travel a good few miles on a single trip, and gradually travel further out as they learn the local area. Due to this range, it isn't recommended to move bees within a few miles of their established location, since when they leave the hive there is a good chance they will recognise the area and travel back to where the hive used to be.
|
|
||||||
|
|
||||||
Bees can be quite sensitive to changes in the hive location, with the general rule being to move the hive only less than one metre OR more than six miles.
|
|
||||||
|
|
||||||
## Social factors
|
|
||||||
|
|
||||||
We worried that the neighbours might be a little nervous about a colony of bees so nearby, but actually they seemed to really enjoy it. They often asked us about it and planted flowers especially!
|
|
||||||
|
|
||||||
This felt really positive given current worldwide efforts in conserving bee populations.
|
|
||||||
|
|
||||||
One of our neighbours also had a pond, which was great as it allowed the bees to drink (they don't like to drink water from sources too close to their hive).
|
|
||||||
|
|
||||||
Generally, bees aren't interested in people at all. They would only become aggressive if they feel their colony is under threat, and so finding a bee out in the wild or a few metres away from the hive is pretty safe. Bees usually travel quite quickly about 10 metres above the ground, and would usually only descend when collecting pollen or when returning to the hive.
|
|
||||||
|
|
||||||
## Adding the "supers"
|
|
||||||
|
|
||||||
We were able to add the shorter "super" frames after a couple of months. It was a long summer, and the bees continued being quite active long into September and beyond.
|
|
||||||
|
|
||||||
Super boxes and frames sit on top of the brood box (you can see two in the photo above), and there is a "queen excluder" board that sits between the supers and the rest of the hive. The excluder features holes that allow workers through to deposit honey into cells in the super frames, but are too small for the queen to get through.
|
|
||||||
|
|
||||||
This is useful for harvesting honey, as the eggs and larvae and pollen stay below in the main brood box. The workers take the excess honey into the super frames, which can be easily withdrawn without disturbing the main hive.
|
|
||||||
|
|
||||||
Bees collect more than enough honey and place most of it in their main brood box, which we do not remove. It is only the excess honey which is taken.
|
|
||||||
|
|
||||||
## First harvest
|
|
||||||
|
|
||||||
We got our first harvest late in the summer. Cities are full of lots of different types of flowers and plants, since every garden is different, and the vibrant gardens were clearly very attractive to the bees!
|
|
||||||
|
|
||||||
![A super frame full of capped honey](/media/blog/bees3.jpg)
|
|
||||||
|
|
||||||
_The image above shows a super frame full of capped honey ready for harvest._
|
|
||||||
|
|
||||||
We checked the supers every week as part of our normal inspections, and eventually decided that the supers were full enough to harvest. The supers, when full of honey, are heavy, which is very satisfying! The honeycomb structure is effective: a single frame could easily hold several jars' worth of honey.
|
|
||||||
|
|
||||||
We extracted the honey by carefully scraping the comb away from the foundation sheet. This gave us a bowl full of honey and wax from the comb, which we strained through cloth into jars. We kept the wax and later purified it - we still haven't done anything with this growing collection of wax!
|
|
||||||
|
|
||||||
Once harvested, the empty frames were re-added to the supers to allow the workers to rebuild the comb and continue honey production.
|
|
||||||
|
|
||||||
## Winter
|
|
||||||
|
|
||||||
As the days got colder, the colony began to naturally shrink in size. We removed the supers and gradually reduced the number of frames within the brood box. This helps the bees maintain their own internal temperature during the winter.
|
|
||||||
|
|
||||||
During the winter there is no way for the bees to collect additional food. They can feed off the honey they produced during the warmer months, but we helped them along by feeding them. This can be done by making a sugar solution and placing this in a "contact feeder" upside-down in the hive.
|
|
||||||
|
|
||||||
We checked the bees less during the winter. This helps prevent the cold air getting in when opening the lid.
|
|
||||||
|
|
||||||
## Spring and swarm watch
|
|
||||||
|
|
||||||
In spring, the days began to get warmer again, which was marked by more bee activity around the hive entrance. We gradually re-added frames as the colony began to increase in size again.
|
|
||||||
|
|
||||||
As the months progressed we kept an eye out for signs of bee swarming. This can be fiddly business, but there are a number of indicators you can use. The most obvious indication of a colony preparing to swarm is the presence of queen cells in the brood frames.
|
|
||||||
|
|
||||||
Queen cells are created by workers when it is time to create a new queen. It is done by elongating a normal worker (female) brood cell outward and feeding the larva royal jelly (a special kind of honey) as it develops. This jelly makes the bee larva grow bigger than a normal bee and with the characteristics of a queen.
|
|
||||||
|
|
||||||
Queen cells are obvious to see, and usually show near the bottom of the frame. I don't have any photos myself but there are lots if you search the web.
|
|
||||||
|
|
||||||
Once a queen cell has been capped (i.e. sealed off for its final development), it is likely that the colony will swarm within the next couple of days.
|
|
||||||
|
|
||||||
## Swarm control
|
|
||||||
|
|
||||||
Swarms are part of the natrual honeybee colony lifecycle, and usually occur every year in healthy colonies. A swarm signifies the division of a colony: the old queen flies away with most of the colony, leaving some bees behind (workers and nurse bees, as well as drones) along with the soon-to-hatch queen(s).
|
|
||||||
|
|
||||||
A swarm is quite an intense process, and involves lots of loud flying bees as the hive gradually empties. However, a bee swarm is actually pretty safe: the bees are often at their most docile when in a swarm.
|
|
||||||
|
|
||||||
There are many ways one can prevent an all-out swarm. Swarm control is definitely recommended in a city to avoid concerning neighbours.
|
|
||||||
|
|
||||||
Some people clip the wings of the queen to prevent her from being able to fly (the colony won't fly away without her). Others carry out an "artificial swarm" - this is what we opted for.
|
|
||||||
|
|
||||||
To artificially swarm the bees, we took some of the brood frames of honey and pollen from one half of the Beehaus and placed them in the other half - making sure the queen was also transferred.
|
|
||||||
|
|
||||||
![Swapping frames between the sides of the hive](/media/blog/bees4.jpg)
|
|
||||||
|
|
||||||
_The image above shows us selecting frames for transfer to the new side of the hive to artifically swarm the colony._
|
|
||||||
|
|
||||||
This left us with two separate colonies - the existing one containing the "new" bees with a few frames of brood, the queen cells, and some pollen and honey, along with some remaining workers, drones, and nurse bees. Importantly, the new colony does not have a current queen.
|
|
||||||
|
|
||||||
The "swarmed" colony on the other side of the Beehaus contained the existing queen, plus many of her workers, drones, and frames of pollen and honey and - importantly - no queen cells! This will now trick the queen into thinking she has already swarmed: it is a "new" hive, no queen cells around or brood. She can begin the process again of egg laying as before.
|
|
||||||
|
|
||||||
## The two colonies
|
|
||||||
|
|
||||||
The "swarmed" colony continued to grow as before. The existing queen gradually continued laying to get the colony back up to size. We were able to place supers back on that side not long after.
|
|
||||||
|
|
||||||
In the "new" queenless colony we removed all but two of the queen cells. After a few days she emerged and went on her mating flight. During this flight she mated with a drone and returned to the hive. Shortly after she began laying and started her other queenly duties.
|
|
||||||
|
|
||||||
We were lucky in that the process worked quite smoothly. Sometimes, the first queen to emerge will explore the hive and sting any still-to-emerge queens through their cells to ensure she is the only queen. If two queens hatch at similar times, they can fight. Either way, there can only be at most one queen. If the queening is unsuccessful for any reason (e.g. they die when fighting or if they fail to emerge from their cells) the colony can be requeened by purchasing a new queen from a farm.
|
|
||||||
|
|
||||||
## Uniting the colonies
|
|
||||||
|
|
||||||
Having two colonies is fine if you are a commercial beekeeper with lots of space. However, colony division would mean that we would eventually have four, eight, sixteen colonies, and so on as they continued to swarm and divide year on year.
|
|
||||||
|
|
||||||
We only really wanted one colony, and having the spare half of the Beehaus is useful for ongoing bee maintenance. As such, we opted to unite the colonies.
|
|
||||||
|
|
||||||
Uniting a colony involves getting rid of one queen and then gradually bringing the colonies together. Given the success of the first queen we had (both in terms of bee temperament and honey production), we chose to keep our old faithful.
|
|
||||||
|
|
||||||
To unite the colonies we first of all removed the new queen (I won't go into the details here). We then modified the divider board between the two halves of the hive to add a sheet of paper. The bees belonging to the new queen would recognise an entirely different set of scents and hormones, and require a slow introduction to our older queen.
|
|
||||||
|
|
||||||
Adding the paper allows the various chemicals to gradually filter through as the bees on either side eat through. This gradual process allows bees on both sides to become accustomed to each other and to allow the now-queenless bees to get used to their new queen.
|
|
||||||
|
|
||||||
Eventually the paper is gone, and we were able to move the frames all back to one side of the Beehaus as before.
|
|
||||||
|
|
||||||
## The second year
|
|
||||||
|
|
||||||
After the swarming and re-uniting, the bees carried on much the same as the first year. We had several great harvests from some very productive bees!
|
|
||||||
|
|
||||||
## Swarm un-control
|
|
||||||
|
|
||||||
In our third year, we again looked out for the signs of swarming but we must have missed a trick. One day I noticed a huge amount of activity around the hive and realised what must have happened. There could have been less obvious queen cells present, or perhaps the existing queen just decided to leave early.
|
|
||||||
|
|
||||||
Either way, the bees were definitely properly swarming. Luckily, it was a weekday and so nearly everyone around was at work or at school (we worked from home at the time). We could see the bees congregating around a tree in a nearby garden - this must have been where the queen was.
|
|
||||||
|
|
||||||
![The bees swarming and gathering around a tree trunk](/media/blog/bees5.jpg)
|
|
||||||
|
|
||||||
_The image above shows bees swarming and a mass of bees gathering around a small tree trunk._
|
|
||||||
|
|
||||||
The swarm was very noisy. We knew we didn't really have long to try and get the queen and the rest of the bees back (they would return home once they knew the queen was no longer there). We didn't want to just go into someone else's garden without permission, and so I went round to introduce myself. Luckily they were home, and were very understanding.
|
|
||||||
|
|
||||||
They let us into their house and garden and we were able to scoop the mass of bees into a shoebox. There was too much activity to identify the queen, but we just _hoped_ we had her.
|
|
||||||
|
|
||||||
![Recovering the swarm](/media/blog/bees6.jpg)
|
|
||||||
|
|
||||||
_We transported the bees over the wall to avoid carrying them through the house._
|
|
||||||
|
|
||||||
![Carrying the bees in a shoebox](/media/blog/bees7.jpg)
|
|
||||||
|
|
||||||
_Success!_
|
|
||||||
|
|
||||||
We were able to get the box back to the hive and emptied them back inside. We removed any of the queen cells we could find. We closed the lid and hoped the ordeal was over.
|
|
||||||
|
|
||||||
Luckily, the activity died down over the rest of the day, and the bees resumed normal activity over the next few days. All of the drama was clearly enough to convince the queen that she had already swarmed, and she was happy to resume business as usual.
|
|
||||||
|
|
||||||
## Continuing on
|
|
||||||
|
|
||||||
The rest of our beekeeping adventure was much less exciting. We got stung a few times (mainly through the gloves), but nothing serious or anything to write home about. We now have a new queen from a more recent swarm.
|
|
||||||
|
|
||||||
Other than the swarm mistake we made, the bees did not bother our neighbours. They enjoyed having the bees around and actively engaged with us and them.
|
|
||||||
|
|
||||||
We definitely learned a lot (and still continue to). It feels great to help support the bee preservation movement, and they are probably some of the most interesting animals on the planet.
|
|
||||||
|
|
||||||
If you are interested in bees, have some space and some easy-going neighbours, I can definitely recommend giving it a try. You can also join local beekeeping societies to join a wider community, and everyone is very supportive!
|
|
@ -1,63 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-12T11:05:00Z"
|
|
||||||
title: "RSS: include your entire posts in your feeds!"
|
|
||||||
description: "Why your feeds should contain the entire post content."
|
|
||||||
tags: [100daystooffload, technology, opinion]
|
|
||||||
slug: rss
|
|
||||||
---
|
|
||||||
|
|
||||||
Recently I've noticed that some of the RSS feeds I subscribe to have become more and more restrictive. A post might contain just a title, or perhaps a short snippet or introductory paragraph, with the expectation that I then proceed to follow the link to visit the website itself in order to view the post in full.
|
|
||||||
|
|
||||||
I suppose in many ways that this is similar to distributing podcasts via RSS: the feed contains the podcast title, description, and other metadata, and then a link to download the podcast episode file itself. But this is because podcasts are in audio or video format and cannot be reasonably embedded directly into an XML file.
|
|
||||||
|
|
||||||
But blog posts and articles are primarily _text_, for which XML is perfect for transferring.
|
|
||||||
|
|
||||||
Most of the examples I've seen are commercial news outlets that probably still make (at least some) income through selling adverts. Although I still disagree with this (we all use ad-blockers anyway), in these cases they have a business objective to get you to their website for their own analytics and to drive their revenues.
|
|
||||||
|
|
||||||
However I have seen some personal text blogs and non-commercial outlets doing the same thing, and if this is intentional I just wonder what the motivation is. Maybe it's for site analytics? Or maybe the author is worried about the size of the feed's XML file getting too large?
|
|
||||||
|
|
||||||
If you need analytics of subscribers the author can simply log basic request info to their feed's XML file download. To prevent the XML file from getting too big authors can simply limit the feed to the most recent _n_ posts.
|
|
||||||
|
|
||||||
Either way, there are a number of good reasons for allowing your subscribers to retrieve entire post contents in their feeds.
|
|
||||||
|
|
||||||
**Client familiarity**
|
|
||||||
|
|
||||||
Many people use a familiar client to consume blog posts and articles. For example, I use [Reeder](https://reeder.app), which has a fantastic interface that makes reading posts and content very enjoyable. It uses a great readable font, displays images beautifully, respects my system's dark/light theme, and much more.
|
|
||||||
|
|
||||||
Other people might enjoy different clients, but the point is that this makes the experience consistent across feeds. Therefore, consuming the content is much quicker and feels more natural. If I have to visit your website to view the article then I have to find where the content starts on your particular site, deal with whatever font and colours you choose and have inconsistent layouts across my different subscriptions.
|
|
||||||
|
|
||||||
I often read posts on my phone, and if your website is non-responsive to smaller screen sizes then this is a massive pain. In general, reading your posts via a client is much less invasive on my time and I can concentrate on actually enjoying the content.
|
|
||||||
|
|
||||||
Clients can also make use of accessibility features (like screen readers) in order to make your post available to a wider audience.
|
|
||||||
|
|
||||||
**Caching**
|
|
||||||
|
|
||||||
When my client refreshes the feed it downloads all the latest unread posts (as well as storing previously-read ones). This means that if I am about to take a flight or get on the tube I know I will have lots of interesting content to read whilst my phone is out of the network's reach.
|
|
||||||
|
|
||||||
However, if I have to visit your website to view the post then it simply can't be read. By the time I've landed the title has probably been forgotten and I won't remember to go back through and load it.
|
|
||||||
|
|
||||||
**Protocol agnosticism**
|
|
||||||
|
|
||||||
RSS is protocol-agnostic in the sense of accessing the content within. For podcasts this may be a link (usually using HTTP) to access the episode file.
|
|
||||||
|
|
||||||
For text feeds, it doesn't matter what the "source" is: it could be a website, an FTP server, a gemini capsule, or anything else. Maybe, in some cases, there _isn't_ an explicit source and RSS is the primary means of distribution?
|
|
||||||
|
|
||||||
Either way, one shouldn't assume that people always want to access via HTTP, and so including the text content directly in the feed helps to keep it pure and simple.
|
|
||||||
|
|
||||||
**Trust**
|
|
||||||
|
|
||||||
If I can read your content directly within my own client, it helps build trust. I know you aren't trying to track my every move and that you care about my ability to read the content.
|
|
||||||
|
|
||||||
I know that you are sharing your content for the sake of the writing or piece itself (perhaps because you enjoy writing or want to share your thoughts), and not in order to drive sales or use patterns to manipulate me into carrying out an action that you want. It also shows you respect user privacy.
|
|
||||||
|
|
||||||
**Lightweight**
|
|
||||||
|
|
||||||
Distributing text-only versions of your posts is much lighter than having to transfer entire webpage files, CSS, JavaScript, and much more. In fact, if you pay for server space with per-MB billed traffic egress then this could help save you money.
|
|
||||||
|
|
||||||
More and more people in the tech community browse the web without JavaScript enabled anyway, so if your site relies on JS to load then they won't be able to view your content. Think about the people in the intended audience of your post.
|
|
||||||
|
|
||||||
## Conclusion
|
|
||||||
|
|
||||||
One can easily argue that "this is my site, and if I write the content then I want people to view it _my_ way". This is perfectly fine, and is entirely up to you. This post is more about explaining why these practices might convey quality-of-life enhancements to your readers, and is just my opinion and not a set of rules.
|
|
||||||
|
|
||||||
The web (and internet in general) is great in that it gives everyone a platform to distribute their content in the way they choose. However, since it's up to others if they choose to read what you post, by making this easier and more accessible to them you can make sure you reach a wider audience.
|
|
@ -1,97 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-18T19:55:00Z"
|
|
||||||
title: "Tmuxinator: simple terminal workspaces for your projects"
|
|
||||||
description: "How to use tmuxinator to manage your tmux based project workspaces."
|
|
||||||
tags: [100daystooffload, technology]
|
|
||||||
slug: tmuxinator
|
|
||||||
---
|
|
||||||
|
|
||||||
## Living without workspaces
|
|
||||||
|
|
||||||
IDEs and richly-featured text editors - such as [VS Code](https://code.visualstudio.com) and [Sublime Text](https://www.sublimetext.com) - support many great features. One of these is the notion of _projects_ or _workspaces_.
|
|
||||||
|
|
||||||
Such workspaces let you save your project's development configuration to disk - things like the project directory, open files, editor layout, integrated terminal commands, and more. Often, each project can have its own workspace, too.
|
|
||||||
|
|
||||||
If you use workspaces then you don't need to go through the tedious process of setting everything back up again each time you switch project, re-open your editor, or reboot your computer.
|
|
||||||
|
|
||||||
However, if, like me, you use the terminal as a primary development environment, things don't work quite so nicely out of the box. For example, I use [tmux](https://github.com/tmux/tmux/wiki) as my primary development environment, and make use of multiple windows and panes for things like Vim, source control, logs, and running commands.
|
|
||||||
|
|
||||||
At any given time, I might have a handful of tmux sessions running (one for each project). A single small session project might consist of a web API service and a separate front-end - each comprising Vim editor panes, and a mix of other things. Context switching is super easy, as I can just detach from a session, and then re-attach to another one that tmux has kept running for me in the background.
|
|
||||||
|
|
||||||
However, the pain point comes when rebooting. Once the tmux server process terminates, all of the running sessions are lost. This means setting each session up again individually each time you want to begin working on a different project after rebooting.
|
|
||||||
|
|
||||||
It certainly feels like a blocker to performing system upgrades that require reboots, and is also extra friction that may prevent one from working on specific projects if the set-up is too painstaking. Both of these are clearly not good.
|
|
||||||
|
|
||||||
However, there is a solution: [tmuxinator](https://github.com/tmuxinator/tmuxinator).
|
|
||||||
|
|
||||||
## Tmuxinator
|
|
||||||
|
|
||||||
Tmuxinator is a program that partly aims to try and fix the workspace problem for tmux-based workflows, and my life is so much easier because of it.
|
|
||||||
|
|
||||||
The program does not interfere with the tmux server directly, and neither does it maintain individual explicit tmux session data - tmux sessions are still lost after reboot.
|
|
||||||
|
|
||||||
However, what it _does_ do is make workspace session management so much easier by storing your project window and pane layout in a simple YAML file on disk.
|
|
||||||
|
|
||||||
For example, a simple API and separate web front-end project (as mentioned above) could be described as the following tmuxinator project:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: cool-project
|
|
||||||
root: ~/project/my-cool-web-project
|
|
||||||
|
|
||||||
windows:
|
|
||||||
- api:
|
|
||||||
root: ~/project/my-cool-web-project/api
|
|
||||||
layout: main-vertical
|
|
||||||
panes:
|
|
||||||
- vim
|
|
||||||
- app:
|
|
||||||
- source .venv/bin/activate
|
|
||||||
- source .envfile
|
|
||||||
- flask run
|
|
||||||
- zsh
|
|
||||||
- frontend:
|
|
||||||
root: ~/project/my-cool-web-project/web
|
|
||||||
layout: main-vertical
|
|
||||||
panes:
|
|
||||||
- vim
|
|
||||||
- yarn start
|
|
||||||
- zsh
|
|
||||||
```
|
|
||||||
|
|
||||||
This project represents two tmux windows, each with three panes: an editor, a server (or watcher), and an empty shell pane that can be used for issuing commands (like `git`). The inbuilt `main-vertical` layout automatically provides a nice, big Vim window (in these cases) for editing, and then a vertically-split extra pair of panes.
|
|
||||||
|
|
||||||
Each window has a separate root directory, and the project as a whole has its own root directory too, to provide better automatic working directories in case new windows are later created when inside the session. Each session and window also gets its own name (e.g. `api` and `frontend` above) to make identification easier later on.
|
|
||||||
|
|
||||||
If this file is stored in `~/.config/tmuxinator/cool-project.yml`, one can simply run `tmuxinator start cool-project` to get started. If the project is already running it will attach you to it as-is. If the project is not currently up and running, tmuxinator will go ahead and create all your windows and panes, run the commands you specify, and connect you to the new session.
|
|
||||||
|
|
||||||
Once inside the session, it's just controlled by plain old tmux. To detach from the session, just use the usual tmux sequence (by default `ctrl-B-D`). You can then connect back to the same session or another one.
|
|
||||||
|
|
||||||
## Creating more project configurations
|
|
||||||
|
|
||||||
Tmuxinator comes with lots of other commands to make set-up easier. For example, running `tmuxinator new <project name>` will open up your editor on a template YAML file for you to edit and then save.
|
|
||||||
|
|
||||||
If you have lots of similar types of projects with the same layouts then the copy command is useful for duplicating projects as a convenient start-point: `tmuxinator cp <existing> <new>`. You can also list and delete projects in a similar way: `tmuxinator ls` and `tmuxinator rm <project name>`.
|
|
||||||
|
|
||||||
Definitely take a look through the [documentation](https://github.com/tmuxinator/tmuxinator) to learn more about these and other commands.
|
|
||||||
|
|
||||||
## Installing tmuxinator
|
|
||||||
|
|
||||||
Many distributions include tmuxinator in their repos. On macOS it's a simple `brew install tmuxinator`.
|
|
||||||
|
|
||||||
Take a look at the [installation instructions](https://github.com/tmuxinator/tmuxinator#installation) for more information.
|
|
||||||
|
|
||||||
## Backing-up and syncing projects
|
|
||||||
|
|
||||||
Being able to recover tmux sessions is great, but what if you want to sync projects between devices, or back them up?
|
|
||||||
|
|
||||||
There are various approaches for this. I store my configuration files in my personal [Nextcloud](https://nextcloud.com), which means I can hydrate any new devices with a simple link:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
ln -s ~/Nextcloud/dotfiles/.config/tmuxinator ~/.config/tmuxinator
|
|
||||||
```
|
|
||||||
|
|
||||||
That way, if I use `tmuxinator new` to create a new project configuration it will automatically get synced to my Nextcloud. This approach also works if you use software like [Syncthing](https://syncthing.net).
|
|
||||||
|
|
||||||
If you work in a team and want to share your setup through version control, you could also commit the project-specific YAML file to your repo. The `tmuxinator start` command will look in `./.tmuxinator.yml` before anywhere else, and so this offers a nice way to get your whole team using a consistent setup. However, in my experience the workspace setup can be quite a personal thing!
|
|
||||||
|
|
||||||
If you have any other thoughts for maintaining terminal-based workspace sessions, then I'd love to hear them. Please let me know.
|
|
@ -1,22 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-19T18:13:00Z"
|
|
||||||
title: "Anxious People by Fredrik Backman"
|
|
||||||
description: "My thoughts on the book Anxious People by Fredrik Backman."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: anxious-people
|
|
||||||
---
|
|
||||||
|
|
||||||
[_Anxious People_](https://www.goodreads.com/book/show/49127718-anxious-people) is a book about an attempted bank robbery in a Swedish town (not Stockholm!). It is written by [Fredrik Backman](https://www.goodreads.com/author/show/6485178.Fredrik_Backman).
|
|
||||||
|
|
||||||
![Anxious People book cover](/media/blog/anxious_people.jpg)
|
|
||||||
|
|
||||||
The story involves a would-be bank robber arriving unexpectedly at an open apartment viewing whilst trying to run away, and taking the prospective buyers hostage in the process. It is mostly split between being set at the apartment itself and the police station in which the hostages are separately interviewed after the event. It is told primarily from the perspectives of the bank robber, the hostages, and the police officers.
|
|
||||||
|
|
||||||
I think that the first few chapters set this book off in the wrong light - they seem a little childish and appear to be filled with annoying and unconvincing characters. However, once past the first few scene-setting parts the story comes into its own.
|
|
||||||
|
|
||||||
Very quickly I got the impression of a deeply intertwined collection of lives that span across all of the characters, from the hostages to the bank robber and the police officers. The interconnection also spans across _time_, with past events that affected one or more of the characters having impact on their present lives, and the particular situation at hand.
|
|
||||||
|
|
||||||
The author cleverly introduces concepts and events earlier on in the novel, which then solidify and take on further meaning and importance as each character's story progresses further.
|
|
||||||
|
|
||||||
Characters I didn't really like at the start of the novel soon become more relatable as I understood them more clearly towards the end. It's definitely an interesting book and one I can recommend as a light, but thought-provoking, read.
|
|
||||||
|
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-23T22:21:00Z"
|
|
||||||
title: "Joining a panel at Wales Tech Week"
|
|
||||||
description: "I was a panellist on a blockchain event at Wales Tech Week."
|
|
||||||
tags: [100daystooffload, technology, life]
|
|
||||||
slug: wales-tech-week
|
|
||||||
---
|
|
||||||
|
|
||||||
[Wales Tech Week](https://www.canva.com/design/DAEe1QqwIPw/SJx9TDgReq4B_V5Vc2w9sA/view?website) is an annual event held by [Technology Connected](https://technologyconnected.net). The 2021 event is running this week, aiming to bring technologists together from a wide range of businesses and organisations across Wales.
|
|
||||||
|
|
||||||
Today, I was a member of a panel discussing blockchain - _"Welsh Businesses Bringing Blockchain to Life"_. I was speaking alongside experts from other companies working in the blockchain and crypto space, and an academic focused on applying the technology to government functions.
|
|
||||||
|
|
||||||
![Screenshot from the event](/media/blog/wales_tech_week.jpg)
|
|
||||||
|
|
||||||
It was a great opportunity to talk about how we are using blockchain at [Simply Do](https://www.simplydo.co.uk) to power some of our complex supply-chain processes, and to also hear about the exciting work from the other panellists.
|
|
||||||
|
|
||||||
The panel was excellently chaired by David Blake from the [Development Bank of Wales](https://developmentbank.wales). They are one of our investors and have been of great help and support to us over the past few years.
|
|
||||||
|
|
||||||
I look forward to continue watching the events over the next couple of days.
|
|
@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-24T21:48:00Z"
|
|
||||||
title: "Self-host your web searches with Whoogle"
|
|
||||||
description: "How you can use Whoogle to self-host your web-searching and avoid ads and tracking."
|
|
||||||
tags: [100daystooffload, technology, selfhost]
|
|
||||||
slug: whoogle
|
|
||||||
---
|
|
||||||
|
|
||||||
## Google and DuckDuckGo
|
|
||||||
|
|
||||||
It's common knowledge that part of Google's business model is to use the data it knows about you, your searches, and browsing patterns in order to more effectively serve ads.
|
|
||||||
|
|
||||||
Many people feel uncomfortable with this and so there is a strong movement to adopt more privacy-focused options, such as [DuckDuckGo](https://duckduckgo.com). This was my position, too. For a few years I've been a solid DuckDuckGo user, and it was my default on Mac and mobile devices.
|
|
||||||
|
|
||||||
However, I do find that for more technical queries - e.g. for specific parts of an API's documentation - it doesn't perform as well as Google. DuckDuckGo supports using [bangs](https://duckduckgo.com/bang) for automatically forwarding searches to another service. For example, prepending a search with `!g` will forward the query to Google instead.
|
|
||||||
|
|
||||||
As time went by, I found myself using `!g` more and more - for both technical and non-technical searches. It got to the point where I was just `!g`-ing everything. And so I wondered what the point was in proxying through DuckDuckGo at all.
|
|
||||||
|
|
||||||
## Self-hosting Whoogle
|
|
||||||
|
|
||||||
Not long ago I saw a link to [Whoogle Search](https://github.com/benbusby/whoogle-search) - a self-hosted open-source Google search replacement.
|
|
||||||
|
|
||||||
Whoogle does not display ads, doesn't rely on lots of JavaScript, and still returns great results. As long as it's hosted somewhere in the cloud then there is no reason for Google to be able to track you, either.
|
|
||||||
|
|
||||||
Getting it up and running on one of my servers was super easy. I use Docker to deploy services and so after pointing a subdomain to the server, setting up the needed certificates, and adding a virtual host to my `nginx` container, all I needed to do to get Whoogle running was to pull and run the container:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run -d -p 5000:5000 benbusby/whoogle-search:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
I could then browse to the server and begin searching.
|
|
||||||
|
|
||||||
There are lots of ways the deployment can be tweaked, including the interface and security settings, so be sure to take a look at the documentation for [more options](https://github.com/benbusby/whoogle-search).
|
|
||||||
|
|
||||||
## Setting Whoogle as Firefox's default search engine
|
|
||||||
|
|
||||||
The main way I search the web is to simply type my query into the address bar in Firefox. To make the best use of Whoogle I needed to configure Firefox to use Whoogle as my default search provider.
|
|
||||||
|
|
||||||
There is no direct option for this in the Firefox settings, however Whoogle complies with the [OpenSearch standard](https://en.wikipedia.org/wiki/OpenSearch). This means that if you visit your self-hosted search-engine in Firefox you should be able to add it to Firefox by right-clicking the address bar and selecting _Add "Whoogle Search"_.
|
|
||||||
|
|
||||||
![Adding Whoogle as a search engine](/media/blog/whoogle1.png)
|
|
||||||
|
|
||||||
Once Whoogle is added to Firefox it can be set as the default search engine through the standard Firefox settings.
|
|
||||||
|
|
||||||
On mobile Firefox - which is even more great now that iOS allows for selecting a default browser! - the process is slightly different. Simply tap _Add search engine_ in the app's settings, and in the URL box enter `https://server.address/search?q=%s` (replacing with your server's address) to add your Whoogle server as your search engine.
|
|
||||||
|
|
||||||
If you're someone that enjoys self-hosting things, then I recommend giving Whoogle a try.
|
|
@ -1,135 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-06-30T21:14:00Z"
|
|
||||||
title: "Using Blurhash to create placeholders for your images"
|
|
||||||
description: "How to use Blurhash to show image previews on your apps and webapps."
|
|
||||||
tags: [100daystooffload, technology]
|
|
||||||
slug: blurhash
|
|
||||||
---
|
|
||||||
|
|
||||||
## Loading indicators
|
|
||||||
In user-facing software, loading indicators are extremely important to let your users know that something is happening. This is the same no matter whether your software is a CLI program, a GUI application, a web app - or anything else.
|
|
||||||
|
|
||||||
Without such indicators, users of your software may become frustrated, or assume the program has crashed, and try to close it or leave.
|
|
||||||
|
|
||||||
Generally speaking, developers should try to keep long-running tasks to a minimum (or even offload them to a cron-job or an asynchronous server-side worker). However, in some cases this is not possible. For example, in a cloud-based file storage solution, in which uploads and downloads are a core and direct user-facing feature, the user must wait until a bunch of files finish uploading - though any post-processing can of course still occur in the background afterwards.
|
|
||||||
|
|
||||||
A loading indicator can be as simple as a `Loading...` string (appropriate for CLI apps) or perhaps a spinner (appropriate for mobile and web apps). However there are lots of other interesting approaches to this too.
|
|
||||||
|
|
||||||
For graphical user interfaces - particularly on the web - using placeholder layouts (or "skeleton screens") is a great way to give the user an idea of what to expect in terms of _layout_ before the actual data finishes loading and rendering.
|
|
||||||
|
|
||||||
![Skeleton screen on LinkedIn](/media/blog/blurhash1.png)
|
|
||||||
|
|
||||||
## Loading text vs images
|
|
||||||
In a GUI or web app, text data is quick to load due to its smaller payload size. For example, when making a request to a webpage containing a mix of text and larger images, the text will load first and render and the user must then wait until the images finish downloading before they can be displayed.
|
|
||||||
|
|
||||||
This may cause the page to shift about once the images do load and get rendered on-screen - this is particularly annoying if the user has already begun reading and lose their point in the document as elements get re-positioned.
|
|
||||||
|
|
||||||
One method to avoid this is to show an image _preview_, which gets rendered at the same time as the text. The preview can then be "filled in" with the real image once it loads.
|
|
||||||
|
|
||||||
To get an image preview on-screen at the same time as the surrounding text and other components one can deliver a smaller version of the image alongside the rest of the data. For example, by generating and delivering an inline data URL of an image directly within the HTML returned from the server.
|
|
||||||
|
|
||||||
Modern web browsers are remarkably efficient at rendering this type of thing. However, it does require that you do some (probably) server-side image processing in order to derive a compressed version of the image before returning it in your HTTP response.
|
|
||||||
|
|
||||||
## Blurhash
|
|
||||||
A perhaps nicer way of accomplishing this is to use [Blurhash](https://github.com/woltapp/blurhash). This tool enables the derivation of a compact string representation of an image that can easily be stored alongside your other data - right in your database - and can easily be returned in API payloads.
|
|
||||||
|
|
||||||
Essentially, the library takes a "hash" of the image, which results in a short string. This string can then be decoded into a smaller image that can be rendered.
|
|
||||||
|
|
||||||
As an example, we can use this picture of my dog:
|
|
||||||
|
|
||||||
![Picture of a dog](/media/blog/blurhash2.jpg)
|
|
||||||
|
|
||||||
Using Blurhash to encode this image, the following string is returned: `URH_SPDl_HxZItM|Iqt7EQxrIpNI9uj?jboM`. When decoding this string back into an image we get something like the below:
|
|
||||||
|
|
||||||
![Blurhash'd picture of a dog](/media/blog/blurhash3.png)
|
|
||||||
|
|
||||||
It is clear that the second image is an approximation of the first. Given that the hash string is so short, downloading and rendering this as a placeholder before the full image itself downloads is quick and easy. It gives a nice preview of the image to the user for the short time it takes for the full image to load.
|
|
||||||
|
|
||||||
Blurhash offers additional ways to tweak the hashing algorithm (e.g. to make it more or less "detailed"), and has implementations for many different languages and frameworks - across desktop, mobile, and web.
|
|
||||||
|
|
||||||
I recommend checking out the [documentation](https://github.com/woltapp/blurhash) for more information on this.
|
|
||||||
|
|
||||||
## Using Blurhash in a JavaScript app
|
|
||||||
If you are writing a webapp, this section may help you get off the ground with Blurhash.
|
|
||||||
|
|
||||||
Use of this library involves two steps; deriving the string "hash", and then rendering this string as an image placeholder. To get started add [blurhash](https://www.npmjs.com/package/blurhash) to your project (e.g. with `yarn add blurhash`).
|
|
||||||
|
|
||||||
First of all we'll look at getting the Blurhash string from an image. For this, we can do the processing browser-side to save on server resources. Assuming the user has just selected an image file for upload, the approach relies on drawing the specified image file to a hidden canvas that can be used to take the hash. The JavaScript code below illustrates this process.
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
import { encode } from 'blurhash';
|
|
||||||
|
|
||||||
const getHash = file => {
|
|
||||||
const canvas = document.createElement('canvas');
|
|
||||||
const context = canvas.getContext('2d');
|
|
||||||
const image = new Image();
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
image.onload = () => {
|
|
||||||
canvas.width = image.width;
|
|
||||||
canvas.height = image.height;
|
|
||||||
context.drawImage(image, 0, 0);
|
|
||||||
const imageData = context.getImageData(0, 0, image.width, image.height);
|
|
||||||
resolve(encode(imageData.data, imageData.width, imageData.height, 4, 4));
|
|
||||||
}
|
|
||||||
image.src = URL.createObjectURL(file);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Elsewhere in your code (e.g. once the user selects a file)
|
|
||||||
const hash = await getHash(file);
|
|
||||||
```
|
|
||||||
|
|
||||||
The `hash` variable can then be sent up to your API and stored safely in your database.
|
|
||||||
|
|
||||||
To later render the Blurhash string, it can be returned directly from your API along with other relevant information and then decoded into an image.
|
|
||||||
|
|
||||||
Below is a React component - `BlurrableImage` - I use to render an image's Blurhash whilst it loads in the background. Once loaded, the image gets rendered in place of the Blurhash.
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
import React, { useState, useEffect, useRef } from 'react';
|
|
||||||
import { decode } from 'blurhash';
|
|
||||||
|
|
||||||
function BlurrableImage({ src, blurHash }) {
|
|
||||||
const [loaded, setLoaded] = useState(false);
|
|
||||||
const canvasRef = useRef();
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
const canvas = canvasRef.current;
|
|
||||||
const context = canvas.getContext('2d');
|
|
||||||
const imageData = context.createImageData(200, 200);
|
|
||||||
const decodedHash = decode(blurHash, 200, 200);
|
|
||||||
imageData.data.set(decodedHash);
|
|
||||||
context.putImageData(imageData, 0, 0);
|
|
||||||
}, [blurHash]);
|
|
||||||
|
|
||||||
return (<>
|
|
||||||
{/* This image will never display. We just use it so we know when the browser has downloaded it. */}
|
|
||||||
<img
|
|
||||||
src={src}
|
|
||||||
onLoad={e => setLoaded(true)}
|
|
||||||
style={{ display: 'none' }}
|
|
||||||
/>
|
|
||||||
|
|
||||||
{/* When the image has been downloaded, we can render it. E.g. here we use it as a background image. */}
|
|
||||||
{loaded &&
|
|
||||||
<div style={{
|
|
||||||
width: 200, height: 200,
|
|
||||||
backgoundSize: 'cover',
|
|
||||||
backgroundPosition: 'center',
|
|
||||||
backgroundImage: `url(${src})`
|
|
||||||
}} />
|
|
||||||
}
|
|
||||||
|
|
||||||
{/* We only show this canvas while loaded == false */}
|
|
||||||
<canvas
|
|
||||||
width={200} height={200}
|
|
||||||
ref={canvasRef}
|
|
||||||
style={{ display: !loaded ? 'block' : 'none' }}
|
|
||||||
/>
|
|
||||||
</>);
|
|
||||||
}
|
|
||||||
|
|
||||||
export default BlurrableImage;
|
|
||||||
```
|
|
||||||
|
|
||||||
If you display images in your application, Blurhash might offer a good solution for keeping your interfaces speedy and intuitive.
|
|
@ -1,25 +0,0 @@
|
|||||||
---
|
|
||||||
date: "2021-07-05T19:45:00Z"
|
|
||||||
title: "Project Hail Mary by Andy Weir"
|
|
||||||
description: "My thoughts on the book Project Hail Mary by Andy Weir."
|
|
||||||
tags: [100daystooffload, book]
|
|
||||||
slug: project-hail-mary
|
|
||||||
---
|
|
||||||
|
|
||||||
[Andy Weir](https://www.goodreads.com/author/show/6540057.Andy_Weir) has become renowned over the past decade for his science fiction novels. [_The Martian_](https://www.goodreads.com/book/show/18007564-the-martian) (and its movie) was hugely enjoyable and successful. I wasn't so keen on [_Artemis_](https://www.goodreads.com/book/show/34928122-artemis), but still did enjoy the excitement of the story.
|
|
||||||
|
|
||||||
I thought his latest book - [_Project Hail Mary_](https://www.goodreads.com/book/show/54493401-project-hail-mary) - was fantastic.
|
|
||||||
|
|
||||||
![Project Hail Mary book cover](/media/blog/project_hail_mary.jpg)
|
|
||||||
|
|
||||||
The story opens with a lone astronaut waking up in a spaceship that he has no memory of. He doesn't know where he is, _who_ he is, or how he got there. Although he works out that he is of pivotal importance to the survival of the human race, the story cleverly keeps you guessing about what might come next right to the end.
|
|
||||||
|
|
||||||
Whilst perhaps the story is a little more far-fetched than his other novels - particularly _The Martian_ - it still sits very much within the realm of possibility when compared to most other science fiction stories. It's set in the - more or less - present day, and, as always, everything is explainable by the author using maths and science.
|
|
||||||
|
|
||||||
This helps to make everything still feel "real" and relatable. The characters are all great, and each with their own quirks. I enjoyed how the story switches back and forth between the past and present in order to explain current events as the story continues to unfold. You build a bond with the astronaut as you learn things about his own past and the present developing situation together as the story progresses.
|
|
||||||
|
|
||||||
My only wish was for the story to be longer! I felt that the ending was perhaps a little rushed and wasn't as satisfying as I'd hoped it would be.
|
|
||||||
|
|
||||||
Interestingly I also recently read [_We Are Legion (We Are Bob)_](https://www.goodreads.com/book/show/32109569-we-are-legion-we-are-bob) by [Dennis E. Taylor](https://www.goodreads.com/author/show/12130438.Dennis_E_Taylor). Published about five years ago, it follows a vaguely similar storyline with the main character leaving Earth on a mission to save humanity. The Audible versions of both books are narrated by the same person - [Ray Porter](https://en.wikipedia.org/wiki/Ray_Porter) - who is fantastic at injecting even more energy and excitement into the stories.
|
|
||||||
|
|
||||||
_Project Hail Mary_ and _We Are Legion_ are relatively short reads (or listens), and so I can definitely recommend them both if you're in the market for new fiction books.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user