Browse Source

Initial commit

master
PapaTutuWawa 2 years ago
commit
8b2cdc1bf9
  1. 3
      .gitignore
  2. 16
      Makefile
  3. 44
      _config.yml
  4. 0
      _drafts/.gitkeep
  5. 7
      _includes/footer.html
  6. 68
      _includes/head.html
  7. 24
      _layouts/default.html
  8. 10
      _layouts/post.html
  9. 108
      _posts/2019-06-08-How-I-Play-Games.md
  10. 317
      _posts/2019-07-01-Mainline-Hero.md
  11. 167
      _posts/2019-08-21-Mainline-Hero-1.md
  12. 160
      _posts/2019-10-06-Road-to-Foss.md
  13. 105
      _posts/2020-01-03-Selfhosting-Lessons.md
  14. 215
      _posts/2020-02-13-Running-Prosody-traefik.md
  15. 33
      about.html
  16. 11
      archive.html
  17. 94
      assets/css/index.css
  18. BIN
      assets/fonts/Roboto-Regular.ttf
  19. BIN
      assets/img/as-long-as-it-works.jpg
  20. BIN
      assets/img/blog-umatrix.jpg
  21. BIN
      assets/img/profile.jpg
  22. BIN
      assets/img/raw/as-long-as-it-works.jpg
  23. BIN
      assets/img/raw/blog-umatrix.jpg
  24. BIN
      assets/img/raw/profile.jpg
  25. BIN
      assets/img/raw/serial-cable.jpg
  26. BIN
      assets/img/serial-cable.jpg
  27. 27
      atom.xml
  28. 45
      index.html
  29. 2
      robots.txt

3
.gitignore

@ -0,0 +1,3 @@
# Build artifacts
_site/
.jekyll-cache/

16
Makefile

@ -0,0 +1,16 @@
img: assets/img/raw/*
for file in assets/img/raw/* ; do \
basename=`basename $$file`; \
convert assets/img/raw/$$basename -sampling-factor 4:2:0 -strip -quality 75 -interlace JPEG -colorspace RGB assets/img/$$basename ; \
done
build: img
rm -rf _site; exit 0
~/.gem/ruby/2.6.0/bin/jekyll build
tar -czf blog.tar.gz _site
serve: img
~/.gem/ruby/2.6.0/bin/jekyll serve \
--drafts \
--unpublished \
--future

44
_config.yml

@ -0,0 +1,44 @@
title: PapaTutuWawa's Blog
social:
blog: blog.polynom.me
mastodon: fosstodon.org/@polynomdivision
git: git.polynom.me/PapaTutuWawa
email:
address: papatutuwawa
domain: polynom.me
gpg: https://pki.polynom.me/pubkeys/papatutuwawa.pub
author:
name: PapaTutuWawa
email: papatutuwawa ["a" with a weird circle] polynom.me
source: .
destination: ./_site
permalink: /:title
defaults:
- scope:
path: ""
values:
layout: default
- scope:
path: ""
type: "posts"
values:
layout: post
plugins:
- jekyll-paginate
- jekyll-seo-tag
- jekyll-sitemap
# jekyll-paginate
paginate: 5
paginate_path: "/page/:num/"
# jekyll-seo-tag
url: "https://blog.polynom.me"
description: "PapaTutuWawa's Blog. Mainly tech stuff."
exclude: ["assets/img/raw/", "blog.tar.gz"]

0
_drafts/.gitkeep

7
_includes/footer.html

@ -0,0 +1,7 @@
<footer>
<center>
Created by <i>PapaTutuWawa</i> with &lt;3 using
<a href="https://github.com/google/roboto">Roboto</a> and
<a href="https://github.com/jekyll/jekyll">jekyll</a>
</center>
</footer>

68
_includes/head.html

@ -0,0 +1,68 @@
<head>
<title>{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}</title>
{% seo %}
<meta name="signature" content="-----BEGIN PGP MESSAGE-----
owGtWAtwVNUZjogjWQtYQGacUed40Wjt7t7sJiCEzfJQY6KAMYECOgXP7j279yT3
3nO999xdVoQRdTQ+GLRTKaAF0dqqM2odX32oQNVx8FVHnYH6rGKnTaVUp1ocLdL/
P/dustEElSEzyd495/zP7/v//9zcMvboujFHbbjaXjt98sU7jnpxb65u2co+J2NK
28rGCPxkTEaN6FFyabFsJ3XpwkAGi2mZnuGTuZYoZvRwKxaeOzWRIHNZkTvkAtZb
sSzSfd5FRNIiKaWT05IpkkhkY8Mo++CqjaE6ciUZdj1J5lPuWBUiWd4kvgwKheSA
6YzNJCUOtVmrVmQO86gUnkbywpHMka1a5EqpOdmYbNSIXpVwPeEyT1ZaNVFsUbpq
hIZ1o0Y4NEcDaQ6xVSs2kilL5OkQW8xZvqj7G7oN5uc97kounG/za7j0jGT9iKi1
uNNLPGa1annqCIdDQBoxPVZo1Uwp3RZdV0GawpctzY2NjfpI3gSeVePF95P1uWTL
MVffDbZBnx22Qh7aXZcWmZ6OLIf5IrLiIuKua0G4mD7dMn7c40Mas7GV2mzc1lq0
xSzXDX5pcQ3LB2zi4vBexav0aRkU72QeaoxrKq6hktqq+BBSjKB3WNTiGma6ZaQM
D2uv1tPZKsOQtlCFDzr8vMlsmhReUVuV0UO3IFvYAs5zjK83AKz8qEkoJPMm9XwG
iC1a2JaYrhI9uBkWQImzsis8WQNvmRvSbDVYiedZQn2JE+5wyamV8LGqWlPJxjix
6QpuB3Z1SYt0DzLAlxWL+SZjAzzQqQ/u+Hre93XuGGxFEp5C/PWwEYY6csKoZGP1
GTOVzeTBJ+ZlM7SqQ8u6kD8J+StD/mabwmYtq0/L6DSb0auHdZBE+SaSt8Bmq6Y6
T8IPctqAxlxlSBusFW4CR+ozBi9VxTE3gDfzyIAi/M1RjwC3+BW4bUEG6pX7A67S
nAhkEru9VtUUSiUwS+AKWJqDZ5RNDOEbGry8yUvs23SEp0bWIoWdXHEoDV3d3TXS
GR1iHzEHA3HWbJagYagGBXuQ9/RAlrsYPpBOKAO/NsVpZGqtBhdOgEPwByrbrrIJ
XRoII92YbtQbm/T0VL2dWpZILGZWlN6sWiC4EDJBYahUuNlMLttQlDNJeipUrUdQ
TUbPwSF34MwQL/zAtqlXIZcHApoM5sPNLqD4UyOCT9U8HVYcaT3VpHcFjsOdYqLT
Ez6QPiE9ygq8Nwoq2iXRLhEOJNKTpLm5CW4AJpQQiQRGDjrVRNpY7vCCzjA7u3De
zHO7WshCk/sEDxL4VLwmXuSdG3lX5tIk7QsXdnYTn3nYPXySE7AGbruR20nSUSAV
EcAaNM889VikTJoMSqkcxweH9AS2S6TI6OAAzAbpCaeYPUc4Pjfw5gGNGcgULRP0
kkIqPOggpOAJG3V4LKmkjyRiKSRfN7MK2NQRtXnMh0HiR2hF38g8Rj2HGaQNXcHj
ifbw/MggNTaRC6hzeCB1iaBoQjIZL5qS2FClJiBUFHFC83nhGQiRFMSukMhtBKyE
iYyTDuK7gUMCF7d/Mp+UTZ43Yx0k8BnKSATdoBV89jEQ1IBHYbDIOD7YlFvqocgl
QRB8AdAmyZwQ0RxeU5EkaArGcIAuShCOwfGyCCyDYN2Wic1CqoChIgNNZTCqoMTL
iMdhXkHUvijIsqIMEl8QCxNN5nEnWEGoYcOMAkoodiTJYpNbLAaOFbgHPhcFtUiZ
+mBIKqIi56hTQf05i9m+Ih4ECV3OgAgY+EuhqbISAAk3bfDHhyWDG8QRKAndjsMU
QpYdGYKlZugp4Ng0vUtQIyFFok34VWbhUrrtou5ukiDzK+QCEQDBFCqdHi/RfIXA
MPuOTJtGLspLgva+N9PUqFLJqUDmQ451AAUMRlyLAvPBIYNLuBDZmFpRUDkdgmC1
LyAHiOKZEYP2QIFlHDicoz62F1XMoDLPUIMddhYVP7VgfjiAcYn5SbIAmMPDjhS6
ZAEiHsrHyog/EhPM97C8OoTIGeA+eg3NwgKDCOjlAcACGiwhjySajdP1dEpX10SY
mol25olEKgK0ukpwFa4h0BzhvY20Ka7OkaDXlT4WEbb7Q+KZTpE5QfHw8OxwFEBh
hajeHkEGMHHmq/YAPRkqoIxwcacgPFvVF+wUqeqyhkJL7UuP53t9aOoO1H0ILRKC
5cA/YCz2ijBoNQwiU9i8QY8AXFW/Jr2AL7OiAgY7VtRNFT4wGMDLHKLKXOphMwOf
YqiI5mUANe6GCUPfsdxVsUKBIzXDEwC66jDgqCWgzVFF25wQqjWipqoHHVGHMmmJ
xSzey1TTKXuKLaoUyiGLkUSwo+ZQZCOMBbWC47gxNbkkuSTTXxMijkU0F1604yQX
YIp9iVMMTHO410sGf2BaxgZnZE9U+x1wX7ddi0n0ygx8UqDeSOwdyt/ae9zXr3FD
L7NDWE+hkyvsq9e/6BLou4D34CEWVUWDRYFqM2FKw34k8W0yqeFPD5TVwAvjN0XT
g9fe7yXX4EVu0oGooqyFWVSPA3msr1kcei58Y4neXjy9+lgAAsB9N/yCl9/tY/Br
/TnQfhA5YHKGZ4e+hvBs2O8aLDmzCcoIaAnGaM2rNL4Ywrw1g1wSSKAXhShaTIdB
JqTAYYGfGJFqhIcU7VHvj9GHlg0/VTLCsKL7OmSgGgm+qan/Xt0wqml03VFj6k6e
fOrorR8HVr6pcizf01Bf/W/XMaPwX111sfrjqysPTB1/4MS7ll5xUmHSssdOf2FC
acuHz81ytNybR0/cfOtnnbeNu+rh1/cVxv/ozI9yux/dfNqSf+w6bVaqWfzgTw/f
uv3+1XWTTi6uPHbfDadM2HTtA5uMxaN2TuxbPfn4Z55596nFXzi7Pp178BfNr6dT
r9z50erEw/unXPuQ82tjHJ8wdusnX176htl39Iy2c3f8atvza55cMn3X43f9Zb2c
9WHf+9ccOGPSjqfvO6HvrY1fffXg+1fu27577Esv82WrbjxlYv+D9/Znbj5h2x/m
vjX/7TXxT7dfv3ftnPf+vmy8/vHcdbs2Xrf7nuuSz055Z/ryvf959oVTjr3k4tEn
GU3v/mzs+HG7b3tv/7Mn3/hZ+rW+C/N/W3Tm6PvunnDdL2//3193zpj9+5sylfUN
7J+BduGVBy95JL5zyobTf7q0rf79j5+Y/lbfjQ/VvfHkFmvp1bvf+fLVP+5Z+9I0
96a11yyYu7S93GG+vOqRn9/weNOip0c11x+3c9PFyfopa+422s97cEf/vzdcc7Ox
Z/8Z/W7/9Q1b3zTL607d8+iBWa8s2Xi5vfqN18wTx91/6fiJZN35B5t3aL9bs96a
OObTs686Ts91nc/5vfccYy25d1zpsjFvr9++7ZOez1955tXPe/sf/83TZmXl8uS/
trZPfV0XL551Vufte9fNmPTbPZuNO2aT3fVrn19351Mf3PHEf0/YYIltJ80rtcj4
vof29a7f333Z7A97Fhz/w3VffPXZn7e0Nq6Z/NjB584+MHbLhv8D
=is0F
-----END PGP MESSAGE-----" />
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1">
<link rel="stylesheet" href="/assets/css/index.css" />
</head>

24
_layouts/default.html

@ -0,0 +1,24 @@
<!DOCTYPE html>
<html lang="en">
{% include head.html %}
<body>
<h1><center><a href="/">papatutuwawa@home:~$</a></center></h1>
<h3 class="title-sub"><center>by PapaTutuWawa</center></h3>
<div class="container title-sub-subbar horizontal">
<a href="/about.html" class="subbar-link"><h3>About</h3></a>
<a href="/archive.html" class="subbar-link"><h3>Archive</h3></a>
<a href="/atom.xml" class="subbar-link"><h3>RSS</h3></a>
</div>
<div class="container">
<div class="vertical">
{{ content }}
</div>
</div>
</body>
<br/>
{% include footer.html %}
</html>

10
_layouts/post.html

@ -0,0 +1,10 @@
---
layout: default
---
<div class="post">
<article>
<h1>{{ page.title }}</h1>
{{ content }}
</article>
</div>

108
_posts/2019-06-08-How-I-Play-Games.md

@ -0,0 +1,108 @@
---
title: How I Play Games on My Linux PC
published: true
---
I love Linux. In fact, I love it so much that it runs on every computer I use, except for my phone but that
can be changed. It always amazes me how much control Linux gives me about my computer and how easy it is
to create a script that just does everything that I was doing manually before.
Since Septemper of 2018, I decided to stop dual booting Windows and Linux and only use Linux. I mean, I could
play my most played games under Linux: *CS:GO, Split/Second Velocity (Wine), NieR: Automata (Wine).* But there
were still some games that I could not play as either have no Linux port or refuse to run with Wine. I love
playing *Tom Clancy's The Division* and *The Division 2*. I really enjoyed playing *Tom Clancy's Rainbow Six Siege* and
*Wildlands* was much fun. Except for *The Division*, none of these games runs under Wine. So what do?
# GPU Passthrough
Before even having the thought of switching to Linux "full-time", I stumbled across [this video](https://invidio.us/watch?v=16dbAUrtMX4) by Level1Linux.
It introduced me to the concept of hardware passthrough and I wanted to do it ever since. Now that my mainboard
has an IOMMU and my CPU supports all needed virtualization extensions, I was ready.
At that time I was using a AMD Ryzen 2400G and a Nvidia Geforce GTX 1060. I chose this particular CPU
as it contains an iGPU, allowing me to have video output of my host even when I pass the 1060 through
to my VM.
<!-- There are many great tutorials out there that teach you to do this thing but I was amazed at how well -->
<!-- the games run. It should have come to no suprise but it still did. -->
The only thing that I did not like was the fact that the Nvidia driver refuses to run in a Virtual Machine, so
I had to configure my VM via libvirt in a way that hides the fact that the driver is run inside a VM.
# Dynamic GPU Passthrough
While this allowed me to play *The Division*, it was tedious to reboot to not have the GPU bound to the
vfio-pci module so that I could use it on my host. Most guides expect you to have a second powerful GPU
so that you don't have to worry about the unavailable GPU but to me it seemed like a waste.
So I wrote myself a script which...
- unloaded all Nvidia kernel modules;
- started libvirt and loaded the vfio-pci module;
- bound the GPU to the vfio-pci module;
- started the VM.
The only problem with this was that the Nvidia modules kept being loaded by the X server. This was annoying
since I had to blacklist the modules, which prevented me from using the GPU on my host. The solution, albeit
very hacky, was a custom package which installed the kernel modules into a new folder from where the modules
were manually inserted using `insmod` by another script.
My host's video output comes from my Ryzen's iGPU. It is not powerful enough to run games like *Split/Second Velocity*
or *CS:GO* at an acceptable framerate, so what do?
Since the Nvidia driver for Linux is proprietary [PRIME offloading](https://wiki.archlinux.org/index.php/PRIME#PRIME_GPU_offloading) was not an option. I, however, discovered
a library which allowed the offloading of an application's rendering - if it uses GLX - onto another GPU: [primus](https://github.com/amonakov/primus).
It worked well enough for games that used OpenGL, like *CS:GO*. But when I tried launching *Split/Second Velocity*
using Wine, it crashed. Vulkan offloading was not possible with primus, but with [primus_vk](https://github.com/felixdoerre/primus_vk). This library I never got to work so I cannot say anything about it.
The only solution to that, from my point-of-view, was to create another script with launched a second X server
on the Nvidia GPU, start Openbox as a WM on that X server and create a seamless transition from my iGPU- to my
Nvidia-X-server using [barrier](https://github.com/debauchee/barrier). I then could start applications like
Steam on the Nvidia X server and use the GPU's full potential.
Since I was using barrier for the second X server I tried doing the same with barrier inside my VM and all I can
say is that it works very well. It made the entire "workflow" with the VM much less painful as I could just take
control of the host if I ever needed to without the need for a second keyboard.
# GPU Changes
Today, my PC runs the same AMD CPU. However, the Nvidia GPU got replaced with an AMD RX 590. This allowed me to
use the opensource amdgpu driver, which was and still is a huge plus for me. It complicated some things for me
though.
While I can now use PRIME offloading on any application I want, I cannot simply unbind the RX 590 from the amdgpu
driver while in X for use in my VM. While the driver exposes this functionality, it crashes the kernel as soon
as I try to suspend or shutdown my computer.
The only solution for this is to blacklist the amdgpu module when starting the kernel, bind the GPU to the vfio-pci
driver and pass it through. Then I can load the amdgpu module again and have it attach itself to my iGPU. When I am
done with using the VM, I can re-attach the GPU to the amdgpu driver and use it there.
There are some issues with this entire setup though:
- sometimes after re-attaching, the GPU does not run with full speed. While I can normally play *CS:GO* with ~80 FPS, it can be as low as ~55 FPS after re-attachment.
- the GPU cannot be reset by the Linux kernel. This means that the GPU has to be disabled inside Windows before shutting down the VM. Otherwise, the amdgpu module cannot bind to the GPU which even crashed my kernel.
# Some Freezes
Ignoring the GPU issue, since around Linux kernel 4.1x I experienced another issue: My computer would sometimes freeze
up when opening *Steam*. In even newer versions, it even freezed by PC when I gave my VM 10GB of RAM, but did not when
I gave my VM only 8GB.
By running htop with a really small refresh interval I was lucky to observe the probable cause of these freezes: The
kernel tried to swap as much as he could, thus making everything grind to a halt. The solution to this, even though
it *feels* hacky, is to just tell the kernel to swap less aggressively by setting `vm.swappiness` to either a much
lower value to swap later to to 0 to stop swapping.
# Audio
QEMU, which I used as libvirt's backend, allows you to "pass through" audio from inside the VM to your PulseAudio socket
on the host. This worked okay-ish at first, but now - presumably because something got updated inside QEMU - it
works well enough to play games. I get the occasional crackling but it is not distracting at all.
I also tried a software called [scream](https://github.com/duncanthrax/scream) which streamed the audio from a
virtual audio device inside the VM to the network. As the only network interface attached to my VM was going directly
to my host, I just set up the receiver application to listen only on this specific interface. This worked remarkebly
well as I never heard any crackling.
The only issue that I had with scream was that, for some reason, *Tom Clancy's The Division 2* would crash every 5
minutes when I was using scream. Without it, *The Division 2* never crashed.
# Conclusion
My solutions are probably not the most elegant or the most practical but
![](/assets/img/as-long-as-it-works.jpg)

317
_posts/2019-07-01-Mainline-Hero.md

@ -0,0 +1,317 @@
---
title: Mainline Hero Part 0 - Modern Linux For My Galaxy S7
hashtag: mainlinehero
published: true
use_math: true
show_warning: true
---
Ever heard of [PostmarketOS](https://postmarketos.org/)? If not, then here's a short summary:
PostmarketOS aims to bring *"[a] real Linux distribution for phones and other mobile devices [...]"* to,
well, phones and other mobile devices.
Ever since reading about it, I've been intrigued by the idea of running a real Linux distro
with my UI of choice, be it *Plasma* or *Unity*, on my phone. Perhaps even running the device
without any proprietary firmware blobs. So, I tried my best at contributing to PostmarketOS, which
resulted in 3 MRs that have been accepted into master (Sorry for forgetting to bump the pkgver...).
With this series - if I manage to not break my phone - I want to document what I, someone
who has absolutely no idea what he is doing, learned about all this stuff, how I went about it
and what the results are.
## Mainline Hero #0 - Preparations
Before I can even think about trying to make mainline Linux run on my *Galaxy S7*, we should think
about how we can diagnose any issues that the kernel or the bootloader might have. And how do
professionals debug? Exactly! With **a lot** of `printf()` statements. But how can we retrieve those
from the device?
### Getting Output
While preparing myself for this task, I learned that there are a couple of ways.
One is called [*RAM console*](https://wiki.postmarketos.org/wiki/Mainlining_FAQ#Writing_dmesg_to_RAM_and_reading_it_out_after_reboot). What is does is just dump everything that the kernel prints into a
reserved region of memory, which can later be retrieved by reading from `/proc/last_kmsg` with a
downstream kernel.
The other one is via a [serial cable](https://wiki.postmarketos.org/wiki/Serial_debugging). This sounded
pretty difficult at first, the reason being that I have no idea about hardware, besides the occasional
**PC** hardware talk. I imagined a cable coming out of a box, packed to the brim with electronics
doing some black magic.
The reality is - thankfully - much simpler. It is, basically, just a normal USB cable. I mean: *USB* literally
stands for [*Universal Serial Bus*](https://en.wikipedia.org/wiki/USB). But how come my PC does not
read those kernel logs when I plug in my phone?
As it turns out, there is a component built into my phone which decides exactly what data flows from my
phone to the PC. Reading the [XDA post](https://forum.xda-developers.com/galaxy-s7/how-to/guide-samsung-galaxy-s7-uart-t3743895) which the PostmarketOS Wiki linked helped understand that my
device contains a *MUIC*, a chip which multiplexes the data lines of the USB cable towards different
"subsystems". As I later learned, the USB standard for connectors of type Micro Type B requires 5 pins:
power, ground, RX, TX and ID. Power and ground should be self-explanatory if you know anything
about electronics (I don't). RX and TX are the two data lines that USB uses. As USB is just a serial
connection, only **one** line is used for sending and one for receiving data. The ID line is the interesting
one: it tells the MUIC what subsystem it should multiplex the data lines to.
[Pinout diagram](https://web.archive.org/web/20190120234321/https://pinouts.ru/PortableDevices/micro_usb_pinout.shtml) of the Micro Type B connector:
```
_______________
/ \
| 1 2 3 4 5 |
+--|--|--|--|--|--+
| | | | +-o Ground
| | | +----o ID
| | +-------o D+ (Data)
| +----------o D- (Data)
+-------------o VCC (Power)
```
According to the XDA post, the MUIC switches to serial - used for dumping output of the bootloader and the
kernel - if it measures a resistance of 619kOhm attached to the ID pin. So, according to the diagram in the
post, I built a serial cable.
But how did the author of the XDA post know of the exact resistance that would tell the MUIC to switch to
serial? If you `grep` the
[*S7*'s defconfig](https://raw.githubusercontent.com/ivanmeler/android_kernel_samsung_herolte/lineage-15.1/arch/arm64/configs/exynos8890-herolte_defconfig),
for `MUIC`, then one of the results is the KConfig flag `CONFIG_MUIC_UNIVERSAL_MAX77854`.
If we then search the kernel tree for the keyword `max77854`, we find multiple files; one being
`drivers/mfd/max77854.c`. This file's copyright header tells us that we deal with a *Maxim 77854* chip. Judging
from the different files we find, it seems as if this chip is not only responsible for switching between serial
and regular USB, but also for e.g. charging (`drivers/battery_v2/include/charger/max77854_charger.h`).
However, the really interesting file is `drivers/muic/max77854.c`, since there we can find an array of structs
that contain strings. Sounds pretty normal until you look at the strings more closely: One of the strings is
the value `"Jig UART On"`:
```
[...]
#if defined(CONFIG_SEC_FACTORY)
{
.adc1k = 0x00,
.adcerr = 0x00,
.adc = ADC_JIG_UART_ON,
.vbvolt = VB_LOW,
.chgdetrun = CHGDETRUN_FALSE,
.chgtyp = CHGTYP_NO_VOLTAGE,
.control1 = CTRL1_UART,
.vps_name = "Jig UART On",
.attached_dev = ATTACHED_DEV_JIG_UART_ON_MUIC,
},
#endif /* CONFIG_SEC_FACTORY */
[...]
```
The keyword `ADC_JIG_UART_ON` seems especially interesting. Why? Well, the driver has to know what to do
with each measured resistance. It would make sense that we call the constant which contains the resistance
something like that. Additionally, it is the only constant name name that does not immediately hint at its
value or function.
So we search the kernel source for this keyword. Most occurences are just
drivers using this constant. But one hit shows its definition: `include/linux/muic/muic.h`. There we
find on [line 106](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/b51cf88008606ebac535785ff549b9f55e5660b4/include/linux/muic/muic.h#L106)
a comment which states that this constant represents a resistance of 619kOhm.
To actually build the serial cable, we need to have a USB Type B male connector that we can solder our cables to.
My first thought was to buy a simple and cheap USB Type B cable, cut it, remove the isolation and solder my
connectors to it. I, however, failed to notice that the Type A part of the cable - the one you plug into e.g.
your PC - only has 4 pins, while the Type B part has 5. After stumbling upon some random diagram, I learned that
for regular USB connectivity, such as connecting your phone to your PC, the ID pin is not needed, so it is left
disconnected. As this plan failed, I proceeded to buy a USB Type B male connector. Since I bought it on the
Internet and the seller did not provide a diagram of what pad on the connector connects to what pin, I also
ordered a USB Type B female breakout board.
After all parts arrived, I used a digital multimeter to measure the resistance between each pad on the connector
and on the breakout board. Since I have no idea about electronics, let me explain: Resistance is defined as
$R = \frac{U}{I}$, where $R$ is the resistance, $U$ the voltage and $I$ the current. This means that we should
measure - practically speaking - infinite resistance when no current is flowing and some resistance $R \gt 0$
when we have a flowing current, meaning that we can test for continuity by attempting to measure resistance.
After some poking around, I got the following diagram:
```
+---------o VCC
| +-----o D+
| | +-o GND
___|___|___|___
/ ? ? ? \
| ? ? |
+------|---|------+
| +---o ID
+-------o D-
```
![The "Serial Cable"](/assets/img/serial-cable.jpg)
Since the data that the serial port inside the phone is coming in using a certain protocol, which also includes
timing, bit order and error correcting codes, we need something to convert this data into something that is
usable on the host. Since the USB specification for data may differ from what we actually receive, we can't just
connect the phone's D- and D+ lines to the host USB's D- and D+. Hence the need for a device which does this
conversion for us and also deals with the timing of the data: The tiny board to which all cables lead to
basically just contains an *FT232RL* chip from *FTDI*. It is what does all the conversion and timing magic.
Since I don't want to accidentally brick by phone my frying it with 3.3V or 5V - though I think that damaging
the hardware with 5V is pretty difficult - I did not connect the USB's 5V to the *FT232*'s VCC port.
Booting up the device, we start to see data being sent via serial!
```
[...]
CP Mailbox Debug
0x10540180 : 0xdca7b414 0x 804f99f
0x10540184 : 0xdeb36080 0x8112566f
0x10540188 : 0xf4bf0800 0x2534862d
0x1054018C : 0x61ff350e 0x1208fd27
0x10540190 : 0x17e60624 0x18121baf
0x105C0038 : 0x3bd58404 0x5674fb39
CP BL flow
0x10920014 : 0x79dab841 0x9b01b3fd
0x10800028 : 0xffbd34b1 0x9fd118cc
Resume el3 flow
EL3_VAL : 0xdcfee785 0xfbb6b0a2 0xccf99641
muic_register_max77854_apis
muic_is_max77854 chip_id:0x54 muic_id:0xb5 -> matched.
[MUIC] print_init_regs
INT:01 00 00 ST:1d 00 00 IM:00 00 00 CDET:2d 0c CTRL:1b 3b 09 b2 HVCT:00 00 LDO0:47
MUIC rev = MAX77854(181)
init_multi_microusb_ic Active MUIC 0xb5
[...]
```
Nice! We can see what *SBOOT*, the bootloader that *Samsung* uses, tells us. But for some reason, I wasn't
able to get into the *SBOOT* prompt to tell the kernel to dump everything via serial. While the XDA post
used the programm `minicom`, which I could use to get *SBOOT* output, it never seemed to send the carriage
returns while I was pressing the return key like crazy. So what I did was try to use a different tool to
interact with the serial converter: `picocom`. And it worked!
Although I set the kernel parameters to output to the TTY device `ttySAC4`, just like the XDA post said,
I did not receive any data.
### Device Tree
So we can just try and boot mainline on the phone then, yes? With a very high probability: no. The reason being
that the kernel has no idea about the actual hardware inside the phone.
This may seem weird as you don't have to tell your kernel about your shiny new GPU or about your RAM. The reason
is that your PC is designed to be modular: You can swap the CPU, the RAM and even the attached devices, like
your GPU. This means that on X86, the CPU is able to discover its hardware since there is only one bus for
attaching devices (ignoring RAM and the CPU): the PCI bus. How does the CPU know about its RAM?
The RAM-modules are swappable, which means that the CPU cannot anticipate just how much RAM you
have in your system. These information get relayed, perhaps via the MMU, to the CPU.
Can't we just probe the available memory in an ARM SoC? Technically yes, but it would take a lot
of time if we have a modern 64 bit CPU. Moreover, how do you know that a probed memory location
is not a memory mapped device? Wouldn't it make sense to bake this data into the SoC then? Here
again: not really. The reason is that the SoCs are vendor specific. This means that the vendor
basically just buys the rights to put the CPU into their SoC. The rest is up to the vendor. They
can add as much RAM as they want, without the CPU designer having much input. This means that the
data must not be **hardcoded** into the CPU.
On ARM and probably most other microprocessors devices can be memory mapped, which means that they respond to
a certain region of memory being written to or read from. This makes auto-discovering devices quite difficult
as you would have to probe **a lot** of memory regions.
As an example: Imagine we can access 4 different locations in memory, each holding 1 byte of data. These regions
are at the memory addresses `0x1` to `0x4`. This means that we would have to probe 4 memory locations. Easy,
right?
Not exactly. We would have to probe 4 times to discover 4 possible memory mapped areas with a width of 1 byte.
If we allow a width of 2 bytes, then we would have to probe 3 different regions: `0x1`-`0x2`, `0x2`-`0x3` and
`0x3`-`0x4`.
This assumes that memory maps need to be directly next to each other. Otherwise we would need to use the
binomial coefficient.
This results in 10 (4x 1 byte, 3x 2 bytes, 2x 3 bytes and 1x 4 bytes) different probing attempts to discover
possible memory mapped devices. This does not seem much when we only have a 2 bit CPU, but in the case of the
*S7*, we have a 64 bit CPU; so we would have to probe about $\sum_{n=1}^{2^{64}} n$ times. This finite sum
is equal ([German Wikipedia](https://de.wikipedia.org/wiki/Gau%C3%9Fsche_Summenformel)) to
$\frac{1}{2} 2^{64} {(2^{64} + 1)} = 1.7014 \cdot 10^{38}$. Quite a lot! Keep in mind that this
calculation does not factor in any other busses that the SoC might use; they can, probably, use their own
address space.
So, long story short: We need to tell the kernel about all the hardware beforehand. This is where the so-called
Device Tree comes into play. It is a structured way of describing the attached hardware. You can find examples
in the kernel tree under `arch/arm{,64}/boot/dts/`. The problem that arises for my phone is that it
uses the Exynos SoC from Samsung. While Exynos 7 or older would just require an addition to the already existing
Device Tree files, the *S7* uses the Exynos 8890 SoC. This one is not in mainline, which mean that it is
required to port it from the [downstream kernel](https://github.com/ivanmeler/android_kernel_samsung_universal8890/) into mainline.
### Device Support
The challenge that follows, required I don't brick my phone, is the kernel support for the SoC's hardware.
#### GPU
The GPU of the Exynos 8890 SoC is a Mali-T880 from ARM. While there is no "official" FOSS-driver for it, one
is in development: [Panfrost](https://gitlab.freedesktop.org/panfrost/linux). One of the developers once
mentioned in PostmarketOS' Matrix channel that the driver is not ready for day-to-day use. But hopefully it
will be in the forseeable future.
#### Wifi
While I found no data on the Exynos 8890's Wifi-chip, I managed to allow the downstream kernel to use it, albeit
with its proprietary firmware ([MR](https://gitlab.com/postmarketOS/pmaports/merge_requests/309)).
This patch requires a patch which changes the path of the firmware in the file `drivers/net/wireless/bcmdhd4359/dhd.h`.
The license header of [said file](https://github.com/ivanmeler/android_kernel_samsung_universal8890/blob/lineage-15.0/drivers/net/wireless/bcmdhd4359/dhd.h)
hints at a chip from Broadcom. The model of the chip appears to be 4359. What the *dhd* stand for? I don't know.
Looking at the compatibility of the [kernel modules](https://wireless.wiki.kernel.org/en/users/drivers/brcm80211) for Broadcom wireless chips, we can find
that the *BCM4359* chip is compatible. But is that the same as the module folder's name specifies? Again, I don't know.
Hopefully it is...
#### Other Components
At the time of writing this post, it has been a "long time" since I last flashed PostmarketOS on
my phone to look at what the kernel is saying. All of this device data I gathered by looking at
spec sheets by Samsung or the kernel. So I don't really know what other hardware is inside my
*S7*.
## Next Steps
The next steps are actually testing things out and playing around with values and settings and all kinds of things.
## Other Devices I Have Lying Around
This may be off-topic for the "*Mainline Hero*" series but I recently tried to find out whether another device
I have lying around - a *Samsung Galaxy Note 8.0* - also uses such a MUIC to multiplex its USB port. While
at first I somehow found out, which I now know is wrong, that the *Note 8.0* uses the same *Maxim 77854* as my
*S7*, I discovered that the *Note 8.0* does use a MUIC, just not the *77854*. Since I found no other links
talking about this, I am not sure until I test it, but what I will do is tell you about how I reached this
conclusion!
If you `grep` the [defconfig for the herolte](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/arch/arm64/configs/exynos8890-herolte_defconfig) for
"*77854*", then one of the results is the flag `CONFIG_MUIC_UNIVERSAL_MAX77854`. The prefix `CONFIG_MUIC` makes
sense since this enables kernel support for the *Maxim 77854* **MUIC**. As such, we should be able to find
an enabled MUIC in the *Note 8.0*'s [defconfig](https://github.com/LineageOS/android_kernel_samsung_smdk4412/blob/lineage-16.0/arch/arm/configs/lineageos_n5110_defconfig).
If we grep for `CONFIG_MUIC`, then we indeed get results. While the results do not look like the one for
the *77854*, we get ones like `CONFIG_MUIC_MAX77693_SUPPORT_OTG_AUDIO_DOCK`. This indicates that the *Note 8.0*
has a *Maxim 77693* MUIC built in. But it's not a very strong indicator. Since the [kernel source](https://github.com/LineageOS/android_kernel_samsung_smdk4412/) is available
on Github, we can just search the repo for the keyword "*MAX77693*". One of the results hints at the file
`drivers/misc/max77693-muic.c`. Looking at the Makefile of the `drivers/misc` directory, we find that this
source file is only compiled with the KConfig flag `CONFIG_MFD_MAX77693`. Grepping the *Note 8.0*'s defconfig
for this flag yields the result that this kernel module is enabled, hence hinting at the existence of a MUIC
in the *Note 8.0*.
If we take a closer look at the source file at `drivers/misc/max77693-muic.c`, we can find an interesting part
at [line 102](https://github.com/LineageOS/android_kernel_samsung_smdk4412/blob/b7ffe7f2aea2391737cdeac2a33217ee0ea4f2ba/drivers/misc/max77693-muic.c#L102):
```
[...]
ADC_JIG_UART_ON = 0x1d, /* 0x11101 619K ohm */
[...]
```
This means that, as the *Maxim 77854* requires a 619kOhm resistor to enable UART, we can debug
the *Note 8.0* with the same serial cable as the *S7*.
Plugging it into the DIY serial cable and booting it up, we also get some output:
```
[...]
BUCK1OUT(vdd_mif) = 0x05
BUCK3DVS1(vdd_int) = 0x20
cardtype: 0x00000007
SB_MMC_HS_52MHZ_1_8V_3V_IO
mmc->card_caps: 0x00000311
mmc->host_caps: 0x00000311
[mmc] capacity = 30777344
```
Theory proven! We **can** also serial debug the *Note 8.0* using the same cable.
## Some Closing Words
I want to emphasize that just very few of the things I mentioned were discovered or implemented by me. I just collected
all these information to tell you about what I learned. The only thing that I can truly say I discovered is the MR for
the Wifi firmware...
Additionally, I want to make it clear that I have no idea about microelectronics, electronics or ARM in general. All the
things I wrote that are about ARM or electronic - especially everything in the *Device Tree* section - is pure speculation
on my side. I never really looked into these things, but all the statements I made make sense to me. You can't just probe
$2^{64}$ different memory addresses just to figure out how much RAM you have, can you?

167
_posts/2019-08-21-Mainline-Hero-1.md

@ -0,0 +1,167 @@
---
title: Mainline Hero Part 1 - First Attempts At Porting
hashtag: mainlinehero
published: true
---
In the first post of the series, I showed what information I gathered and what tricks can be used
to debug our mainline port of the *herolte* kernel. While I learned a lot just by preparing for
the actual porting, I was not able to actually get as close as to booting the kernel. I would have
liked to write about what I did to *actually* boot a *5.X.X* kernel on the device, but instead I will tell you
about the journey I completed thus far.
If you are curious about the progress I made, you can find the patches [here]({{ site.social.git_url}}/herolte-mainline). The first patches I produced are in the `patches/` directory, while the ones I created with lower
expectations are in the `patches_v2/` directory. Both "patchsets" are based on the `linux-next` source.
## Starting Out
My initial expectations about mainlining were simple: *The kernel should at least boot and then perhaps
crash in some way I can debug*.
This, however, was my first mistake: Nothing is that easy! Ignoring this, I immeditately began writing
up a *Device Tree* based on the original downstream source. This was the first big challenge as the amount of
downstream *Device Tree* files is overwhelming:
```
$ wc -l exynos* | awk -F\ '{print $1}' | awk '{sum += $1} END {print sum}'
54952
```
But I chewed through most of them by just looking for interesting nodes like `cpu` or `memory`, after which
I transfered them into a new simple *Device Tree*. At this point I learned that the *Github* search does not
work as well as I thought it does. It **does** find what I searched for. But only sometimes. So how to we find
what we are looking for? By *grep*-ping through the files. Using `grep -i -r cpu .` we are able to search
a directory tree for the keyword `cpu`. But while *grep* does a wonderful job, it is kind of slow. So at that
point I switched over to a tool called `ripgrep` which does these searches a lot faster than plain-old grep.
At some point, I found it very tiring to search for nodes; The reason being that I had to search for specific
nodes without knowing their names or locations. This led to the creation of a script which parses a *Device Tree*
while following includes of other *Device Tree* files, allowing me to search for nodes which have, for example, a
certain attribute set. This script is also included in the "patch repository", however, it does not work perfectly.
It finds most of the nodes but not all of them but was sufficient for my searches.
After finally having the basic nodes in my *Device Tree*, I started to port over all of the required nodes
to enable the serial interface on the SoC. This was the next big mistake I made: I tried to do too much
without verifiying that the kernel even boots. This was also the point where I learned that the *Device Tree*
by itself doesn't really do anything. It just tells the kernel how the SoC looks like so that the correct
drivers can be loaded and initialized. So I knew that I had to port drivers from the downstream kernel into the
mainline kernel. The kernel identifies the corresponding driver by looking at the data that the drivers
expose.
```
[...]
static struct of_device_id ext_clk_match[] __initdata = {
{ .compatible = "samsung,exynos8890-oscclk", .data = (void *)0, },
};
[...]
```
This is an example from the [clock driver](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/drivers/clk/samsung/clk-exynos8890.c#L122) of the downstream kernel.
When the kernel is processing a node of the *Device Tree* it looks for a driver that exposes the same
compatible attribute. In this case, it would be the *Samsung* clock driver.
So at this point I was wildly copying over driver code into the mainline kernel. As I forgot this during the
porting attempt, I am
mentioning my mistake again: I never thought about the possibility that the kernel would not boot at all.
After having "ported" the driver code for the clock and some other devices I decided to try and boot the
kernel. Having my phone plugged into the serial adapter made my terminal show nothing. So I went into the
*S-Boot* console to poke around. There I tried some commands in the hope that the bootloader would initialize
the hardware for me so that it magically makes the kernel boot and give me serial output. One was especially
interesting at that time: The name made it look like it would test whether the processor can do **SMP** -
**S**ymmetric **M**ulti**p**rocessing; *ARM*'s version of *Intel*'s *Hyper Threading* or *AMD*'s *SMT*.
By continuing to boot, I got some output via the serial interface! It was garbage data, but it was data. This
gave me some hope. However, it was just some data that was pushed by something other than the kernel. I checked
this hypothesis by installing the downstream kernel, issuing the same commands and booting the kernel.
## Back To The Drawing Board
At this point I was kind of frustrated. I knew that this endeavour was going to be difficult, but I immensely
underestimated it.
After taking a break, I went back to my computer with a new tactic: Port as few things as possible, confirm that
it boots and then port the rest. This was inspired by the way the *Galaxy Nexus* was mainlined in
[this](https://postmarketos.org/blog/2019/06/23/two-years/) blog post.
What did I do this time? The first step was a minimal *Device Tree*. No clock nodes. No serial nodes. No
GPIO nodes. Just the CPU, the memory and a *chosen* node. Setting the `CONFIG_PANIC_TIMEOUT`
[option](https://cateee.net/lkddb/web-lkddb/PANIC_TIMEOUT.html) to 5, waiting at least 15 seconds and seeing
no reboot, I was thinking that the phone did boot the mainline kernel. But before getting too excited, as I
kept in mind that it was a hugely difficult endeavour, I asked in *postmarketOS*' mainline Matrix channel whether it could happen that the phone panics and still does not reboot. The answer I got
was that it could, indeed, happen. It seems like the CPU does not know how to shut itself off. On the x86 platform, this
is the task of *ACPI*, while on *ARM* [*PSCI*](https://linux-sunxi.org/PSCI), the **P**ower **S**tate
**C**oordination **I**nterface, is responsible for it. Since the mainline kernel knows about *PSCI*, I wondered
why my phone did not reboot. As the result of some thinking I thought up 3 possibilities:
1. The kernel boots just fine and does not panic. Hence no reboot.
2. The kernel panics and wants to reboot but the *PSCI* implementation in the downstream kernel differs from the mainline code.
3. The kernel just does not boot.
The first possibility I threw out of the window immeditately. It was just too easy. As such, I began
investigating the *PSCI* code. Out of curiosity, I looked at the implementation of the `emergency_restart`
function of the kernel and discovered that the function `arm_pm_restart` is used on *arm64*. Looking deeper, I
found out that this function is only set when the *Device Tree* contains a *PSCI* node of a supported version.
The downstream node is compatible with version `0.1`, which does not support the `SYSTEM_RESET` functionality
of *PSCI*. Since I could just turn off or restart the phone when using *Android* or *postmarketOS*, I knew
that there is something that just works around old firmware.
The downstream [*PSCI* node](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/arch/arm64/boot/dts/exynos8890.dtsi#L316) just specifies that it is compatible with `arm,psci`, so
how do I know that it is only firmware version `0.1` and how do I know of this `SYSTEM_RESET`?
If we grep for the compatible attribute `arm,psci` we find it as the value of the `compatible` field in the
source file `arch/arm64/kernel/psci.c`. It [specifies](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/arch/arm64/kernel/psci.c#L381) that the exact attribute of `arm,psci`
results in a call to the function `psci_0_1_init`. This indicates a version of *PSCI*. If we take a look
at *ARM*'s [*PSCI* documentation](http://infocenter.arm.com/help/topic/com.arm.doc.den0022d/Power_State_Coordination_Interface_PDD_v1_1_DEN0022D.pdf)
we find a section called *"Changes in PSCIv0.2 from first proposal"* which contains the information that,
compared to version 0.2, the call `SYSTEM_RESET` was added. Hence we can guess that the *Exynos8890* SoC
comes with firmware which only supports this version 0.1 of *PSCI*.
After a lot of searching, I found a node called `reboot` in the [downstream source](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/arch/arm64/boot/dts/exynos8890.dtsi#L116).
The compatible driver for it is within the [*Samsung* SoC](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/drivers/soc/samsung/exynos-reboot.c) driver code.
Effectively, the way this code reboots the SoC, is by mapping the address of the PMU, which I guess stands for
*Power Management Unit*, into memory and writing some value
to it. This value is probably the command which tells the PMU to reset the SoC.
In my "patchset" *patches_v2* I have ported this code. Testing it with the downstream kernel, it
made the device do something. Although it crashed the kernel, it was enough to debug.
To test the mainline kernel, I added an `emergency_restart` at the beginning of the `start_kernel` function.
The result was that the device did not do anything. The only option I had left was 3; the kernel does not even
boot.
At this point I began investigating the `arch/arm64/` code of the downstream kernel more closely. However, I
noticed something unrelated during a kernel build: The downstream kernel logs something with *FIPS* at the
end of the build. Grepping for it resulted in some code at [the end](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/scripts/link-vmlinux.sh#L253) of the `link-vmlinuz.sh` script. I thought
that it was signing the kernel with a key in the repo, but it probably is doing something else. I tested
whether the downstream kernel boots without these crypto scripts and it did.
The only thing I did not test was whether the kernel boots without
["double-checking [the] jopp magic"](https://github.com/ivanmeler/android_kernel_samsung_herolte/blob/lineage-15.1/scripts/link-vmlinux.sh#L270). But by looking at this script, I noticed another interesting thing:
`CONFIG_RELOCATABLE_KERNEL`. By having just a rough idea of what this config option enables, I removed it
from the downstream kernel and tried to boot. But the kernel did not boot. This meant that this option
was required for booting the kernel. This was the only success I can report.
By grepping for this config option I found the file `arch/arm64/kernel/head.S`. I did not know what it was
for so I searched the internet and found a [thread](https://unix.stackexchange.com/questions/139297/what-are-the-two-head-s-files-in-linux-source)
on *StackOverflow* that explained that the file
is prepended onto the kernel and executed before `start_kernel`. I mainly investigated this file, but in
hindsight I should have also looked more at the other occurences of the `CONFIG_RELOCATABLE_KERNEL` option.
So what I did was try and port over code from the downstream `head.S` into the mainline `head.S`. This is
the point where I am at now. I did not progress any further as I am not used to assembly code or *ARM*
assembly, but I still got some more hypotheses as to why the kernel does not boot.
1. For some reason the CPU never reaches the instruction to jump to `start_kernel`.
2. The CPU fails to initialize the MMU or some other low-level component and thus cannot jump into `start_kernel`.
At the moment, option 2 seems the most likely as the code from the downstream kernel and the mainline kernel
do differ some and I expect that *Samsung* added some code as their MMU might have some quirks that the
mainline kernel does not address. However, I did not have the chance to either confirm or deny any of these
assumptions.
As a bottom line, I can say that the most useful, but in my case most ignored, thing I learned is patience.
During the entire porting process I tried to do as much as I can in the shortest amount of time possible.
However, I quickly realized that I got the best ideas when I was doing something completely different. As
such, I also learned that it is incredibly useful to always have a piece of paper or a text editor handy
to write down any ideas you might have. You never know what might be useful and what not.
I also want to mention that I used the [*Bootlin Elixir Cross Referencer*](https://elixir.bootlin.com/linux/latest/source)
a lot. It is a very useful tool to use when exploring the kernel source tree. However, I would still
recommend to have a local copy so that you can very easily grep through the code and find things that
neither *Github* nor *Elixir* can find.

160
_posts/2019-10-06-Road-to-Foss.md

@ -0,0 +1,160 @@
---
title: Road2FOSS - My Journey to Privacy by Self-Hosting
hashtag: road2foss
---
About one year ago, I made plans to ditch many of the proprietary services that I used
on a daily basis and replace them with FOSS alternatives. Now it is a year later and
while my project is not done, I really did quite a lot.
## History
But why do all this?
The answer consists of three main points, though they are weighed differently:
1. Privacy: The inspiration for this project came from the fact that I did not trust my messaging application back then. It was proprietary and probably collecting all the data it could, thus I wanted to get away from it.
2. Learning: I really enjoy tinkering with computer hardware, software and am quite interested in server administration. Hence, I thought it would be a greate learning opportunity for me.
3. Fun: I do enjoy this kind of work, so I thought it would be a fun, but quite major, side project.
I knew that it would be a major undertaking but I still wanted to give it a try.
## Instant Messaging
Judging by the amount of personal data I leak when texting people I know I wanted to switch IM services
as quickly as possible.
At this stage, there were three candidates for me:
- *Signal*
- *Matrix* with Riot
- *Jabber/XMPP*
Originally, *Signal* was my preferred choice since I really liked its interface. But the problem with Signal,
and I do not blame the developers for this one, is that the service only works with a mobile device running
the app. If I wanted to run *Signal* on my computer because, for example, my phone is broken or the battery
is empty, then I just could not since it requires my phone to be online. Also, which I learned only just recently,
*Signal*'s *Android* app has a bug which [drains the phone's battery](https://github.com/signalapp/Signal-Android/issues/8658)
when one does not have *Google services* installed on their phone.
*Matrix* in combination with Riot was another idea of mine. But here the problem was the mobile app. It
seemed to me more like the interface of messengers like *Slack* and *Discord*, which I personally do not like
for mobile Instant Messaging. When I last looked at the entire *Matrix* ecosystem, there was only one
well-working client for mobile, which was Riot. Additionally, the homeserver was difficult to set up; at least much more than
*Prosody*, to which I will come in the next paragraph. Moreover, I read in the the [*Disroot blog*](https://web.archive.org/web/20190921180013/https://disroot.org/en/blog/donating_floss) that they have
quite some problems with their *Matrix* homeserver as *"[...] [k]eeping room history and all metadata connected to them forever
is a terrible idea, in our opinion, and not sustainable at all. One year of history is way too much already [...]"*. This
was the end for the idea of self-hosting a *Matrix* server.
*Jabber/XMPP* being something I saw only once way back when browsing a linux forum, I became interested. It
checked all my requirements: It is cross-platform, as it is only a protocol, allows self-hosting with FOSS
software and, the most important factor, includes End-to-End-Encryption using *OMEMO*. I also started to
appreciate federated software solutions, which made *Jabber* the clear winner for me. Tehe *Jabber* clients
that I now use on a daily basis are also very fine pieces of opensource software: *Conversations*' interface
is simple, works without draining my battery and it just works. *Gajim*, after some configuration and tweaking,
works really well, looks clean and simple and I would really love to replace *Discord* on the desktop with
*Gajim*.
Recently, I also started to use *Profanity*, which seems a bit rough around the edges and sometimes does not
work, but maybe I am just doing something wrong.
In terms of server software I initially wanted to go with *ejabberd*. But after seeing its amount of
documentation, I just chose *Prosody*. It is the software that was the least painful to set up with all
requirements for modern messaging being covered by it internal or external modules. It also never crashed;
only when I messed the configuration up with syntax errors.
Since I use *Discord* and it is more difficult to bring people over from there, I went with a compromise
and started to bridge the channels I use the most to a *Jabber MUC* using [*matterbridge*](https://github.com/42wim/matterbridge).
Thus I can use those channels without having to have the *Discord* app installed on my devices.
Another use I got out of *Jabber* is the fact that I can create as many bot accounts on my server as I want. While this
sounds like I use those bots for bad things it is the opposite: I use them to tell me when something is wrong
using *netdata* or for the already mentioned bridge between *Discord* and *Jabber*.
## VoIP
VoIP is something that I use even more than plain Instant Messaging, which is why I wanted to self-host
a FOSS VoIP-solution. The most commonly used one is *Mumble*, which was a run-and-forget experience. Especially
when not using the full server but a smaller one like *umurmur*.
## Code
At first, I used *Github*. But after *Microsoft* bought it, I was a bit sceptical and switched to *Gitlab*, which
worked really well. It was even opensource so I started using it. But after some time, I found that
there are some things that annoy me with *Gitlab*. This includes it automatically enabling "Pipelines" when I
just created a repository even though I never enabled those.
That was when I came across *gogs* and *gitea*; the latter being my current solution. I wanted a simple
software that I can just run and has a somewhat nice interface. Why the nice interface? I want that if people
look at my code that it feels familiar to browse it in the browser. Also, I can invite friends to use it if
they also want to get away from proprietary services and software.
My instance has registrations disabled as I do not have the time to moderate it, but I have seen that federation
of some sorts, in the context of *ForgeFed*, is being discussed on the issue tracker, though you should not quote
me on this one.
*Gitea* was mostly a run-and-forget experience for me and is working very well.
## Personal Information Management
Since I've started to use calendars more, I wanted a solution to sync those across my devices. Before this entire
project I was using *Google*'s own calendar service. Then I started using *Disroot*'s NextCloud to synchronize
calendar data. However, it not being encrypted at rest was a concern for me as my calendar does contain some
events that I would not like an attacker to know as this would put the attacker in a position where sensitve
information can be deduced about me.
After some looking around, I found [*EteSync*](https://github.com/etesync). This software works really great, given that the server is just
a simple django app that stores data and does user management and authentication. The *Android* app, in my case,
does most of the work and works really well. The only problem I had was the fact that *EteSync* has no desktop
client. They provide a web app and a server that bridges between regular DAV and *EteSync* but nothing like
a regular client.
Since I used regular WebDAV services, like the *Disroot* one I mentioned earlier, I have [*vdirsyncer*](https://github.com/pimutils/vdirsyncer)
installed and configured only to find out that they dropper support for *EteSync* in the last version.
Wanting a tool like *vdirsyncer* but for *EteSync* I went to work and created [*etesyncer*](https://git.polynom.me/PapaTutuWawa/etesyncer).
## EMail
Most of my online life I used proprietary EMail-services. Most of that time I used *GMail*. Since I bought a
domain for this project and have a server running, I thought: *"Why not self-host EMail?"*. This is exactly
what I did!
I use the "traditional" combination of *postfix* and *dovecot* to handle incoming, outgoing EMail and IMAP
access. Since I use [*mu4e*](https://web.archive.org/web/20190921054652/http://www.djcbsoftware.nl/code/mu/mu4e.html) in combination with *msmtp* and *mbsync* for working with email, I did not
install a webmail client.
This was the most difficult part to get working as the configuration sometimes worked and sometimes not.
The main culprit here was *DKIM* because it changed the permissions of its files at startup to something else
which made *openDKIM* crash. Now it stopped doing this but I am not sure why.
What made the EMail-server so difficult was also the fact that so much goes into hosting an EMail-server I never
thought about, like *DKIM*, *SPF* or having a *FQDN*.
At this point, it pretty much runs itself. It works, it receives EMails, it sends EMails and it allows
me to view my EMails via IMAP.
Coming from *Protonmail*, the only thing that I am missing is encryption of my EMails. Since not every person
I contact using EMail uses or knows *PGP*, I would like to encrypt incoming EMails. While there are solutions
to do this, they all involve encrypting the EMail after they are put in the queue by *postfix*, which puts
them on disk. Hence, the mail was once written in plaintext. While I would like to avoid this, I have not
found a way of doing this without digging into *postfix*'s code and adding support for this.
## Blog
I wanted a blog for a long time and since I had a spare domain lying around, I decided to create one. While
I could have gone with a solution like *Wordpress* and the like, they were too complicated for my needs.
So I just went with the simplest solution which is using a static site generator: *jekyll* in my case.
This is one of the points where decentralization was a huge factor directly from the start, as this is exactly
what the web was made for, so I was actively avoiding any non-selfhost solutions. While I could have gone with
a federated solution like *write freely*, I chose the staic page generator as it was much simpler. And because
I love writing in Markdown.
## Webserver
Since I now use *GPG* to sign any emails that I send, I needed a way of exposing these keys to the public. While
I could have gone with a keyserver, I decided against it. Admittedly, I did not look into self-hosting a
keyserver but this was not my plan. I want to keep everything simple and prevent myself from installing too many
services on my server. This led me to just putting my public keys on the server and pointing my
webserver to them.
Since I run multiple services that are accessible via the browser, I needed the webserver as a reverse proxy,
pointing my different domain names to the correct services. This way, all services can run on their own ports while
the reverse proxy "unifies" them on port 443.
## Conclusion
All in all I am very happy with my setup. It allows me to host my own instances privacy-respecting software the way I like
to. It gives me something to do and allows me to learn about system administration and different tools like *Docker*
or *Ansible*. So all in all, although the project has no real end, I would say that it was and is a huge success for me.
During the course of this project, I also switched services like my search engine or the software with which I watch videos
but as I do not self-host these, I did not mention them.

105
_posts/2020-01-03-Selfhosting-Lessons.md

@ -0,0 +1,105 @@
---
title: Lessons Learned From Self-Hosting
hashtag: selfhostlessons
---
Roughly eight months ago, according to my hosting provider, I spun up my VM which
I use to this day to self-host my chat, my mail, my git and so on. At the beginning, I thought that
it would allow me both to get away from proprietary software and to learn Linux administration. While
my first goal was met without any problems, the second one I achieved in ways I did not anticipate.
During these eight months, I learned quite a lot. Not by reading documentation, but by messing up
deployments. So this post is my telling of how I messed up and what lessons I learned from it.
# Lesson 1: Document everything
I always tell people that you should document your code. When asked why I answer that you won't
remember what that line does when you have not looked at your codebase for weeks or months.
What I did not realise is that this also applies to administration. I only wrote basic documentation
like a howto for certificate generation or a small troubleshooting guide. This, however, missed the most
important thing to document: the entire infrastructure.
Whenever I needed to look up my port mapping, what did I do? I opened up my *Docker compose* configuration
and search for the port mappings. What did I do when I wanted to know what services I have? Open my
*nginx* configuration and search for `server` directives.
This is a very slow process since I have to remember what services I have behind a reverse proxy and which
ones I have simply exposed. This lead me in the end to creating a folder - called `docs` - in which
I document everything. What certificates are used by what and where they are, port mappings, a graph
showing the dependencies of my services, ... While it may be tedious to create at first, it will really
help.
```
[World]
+
|
+-[443]-[nginx]-+-(blog.polynom.me)
+-(git.polynom.me)-[gitea]
```
Above, you can see an excerpt from my *"network graph"*.
# Lesson 2: Version Control everything
Version Control Systems are a great thing. Want to try something out? Branch, try out and then either
merge back or roll back. Want to find out what changes broke something? Diff the last revisions and narrow
down your "search space". Want to know what you did? View the log.
While it might seem unneccessary, it helps me keep my cool, knowing that if I ever mess up my configuration, I
can just roll back the configuration from within git.
# Lesson 3: Have a test environment
While I was out once, I connected to a public Wifi. There, however, I could not connect to my VPN. It simply
did not work. A bit later, my Jabber client *Conversations* told me that it could not find my server. After
some thinking, I came to the conclusion that the provider of said public Wifi was probably blocking port `5222`
*(XMPP Client-to-Server)* and whatever port the VPN is using. As such, I wanted to change the port my
Jabber server uses. Since I do not have a failover server I tried testing things out locally, but gave up
after some time and just went and "tested in production". Needless to say that this was a bad idea. At first,
*Conversations* did not do a DNS lookup to see the changed XMPP port, which lead me to removing the DNS entry.
However, after some time - probably after the DNS change propagated far enough - *Conversations* said that it
could not find the server, even though it was listening on port `5222`. Testing with the new port yieled
success.
This experience was terrible for me. Not only was it possible that I broke my Jabber server, but it would
annoy everyone I got to install a Jabber client to talk to me as it would display *"Cannot connect to..."*.
If I had tested this locally, I probably would have been much calmer. In the end, I nervously watched as everyone
gradually reconnected...
# Lesson 4: Use tools and write scripts
The first server I ever got I provisioned manually. I mean, back then it made sense: It was a one-time provisioning and nothing should
change after the initial deployment. But now that I have a continually evolving server, I somehow need to document every step in case
I ever need to provision the same server again.
In my case it is *Ansible*. In my playbook I keep all the roles, e.g. *nginx*, *matterbridge*, *prosody*, separate and apply them to my one
server. In there I also made **heavy** use of templates. The reason for it is that before I started my [*"Road to FOSS"*](https://blog.polynom.me/Road-to-Foss.html)
I used a different domain that I had lying around. Changing the domain name manually would have been a very tedious process, so I decided to use
templates from the get-go. To make my life easier in case I ever change domains again, I defined all my domain names based on my `domain` variable.
The domain for git is defined as {% raw %}`git.{{ domain }}`{% endraw %}, the blog one as {% raw %}`blog.{{ domain }}`{% endraw %}.
Additionally, I make use of *Ansible Vaults*, allowing me to have encrypted secrets in my playbook.
During another project, I also set up an *Ansible* playbook. There, however, I did not use templates. I templated the configuration files using a Makefile
that was calling `sed` to replace the patterns. Not only was that a fragile method, it was also unneeded as *Ansible* was already providing
this functionality for me. I was just wasting my own time.
What I also learned was that one *Ansible* playbook is not enough. While it is nice to automatically provision a server using *Ansible*, there are other things
that need to be done. Certificates don't rotate themselves. From that, I derived a rule stating that if a task needs to be done more than once, then it is
time to write a script for it.
# Lesson 4.1: Automate
Closely tied to the last point: If a task needs to be performed, then you should consider creating a cronjob, or a systemd timer if that is more your thing,
to automatically run it. You don't want to enjoy your day, only for it to be ruined by an expired certificate causing issues.
Since automated cronjobs can cause trouble aswell, I decided to run all automated tasks on days at a time during which I am like to be able to react. As such, it is very
important to notify yourself of those automated actions. My certificate rotation, for example, sends me an eMail at the end, telling me if the certificates
were successfully rotated and if not, which ones failed. For those cases, I also keep a log of the rotation process somewhere else so that I can review it.
# Lesson 5: Unexpected things happen
After having my shiny server run for some time, I was happy. It was basically running itself. Until *Conversations* was unable to contact my server,
connected to a public Wifi. This is something that I did not anticipate, but happened nevertheless.
This means that my deployment was not a run-and-forget solution but a constantly evolving system, where small improvements are periodically added.
# Conclusion
I thought I would just write down my thoughts on all the things that went wrong over the course of my self-hosting adventure. They may not
be best practices, but things that really helped me a lot.
Was the entire process difficult? At first. Was the experience an opportunity to learn? Absolutely! Was it fun? Definitely.

215
_posts/2020-02-13-Running-Prosody-traefik.md

@ -0,0 +1,215 @@
---
title: Running Prosody on Port 443 Behind traefik
hashtag: prosodytraefik
---
*TL;DR: This post is about running prosody with HTTPS services both on port 443. If you only care about the how, then jump to*
**Considerations** *and read from there.*
# Introduction
As part of my [*"road to FOSS"*](https://blog.polynom.me/Road-to-Foss.html) I
set up my own XMPP server using *prosody*. While it has been running fine for
quite some time, I noticed, while connected to a public Wifi, that my
server was unreachable. At that time I was panicing because I thought prosody
kept crashing for some reason. After using my mobile data, however, I saw
that I **could** connect to my server. The only possible explanation I came
up with is that the provider of the public Wifi is blocking anything that
is not port 53, 80 or 443. *(Other ports I did not try)*
My solution: Move *prosody*'s C2S - *Client to Server* - port from 5222 to
either port 53, 80 or 443. Port 53 did not seem like a good choice as I
want to keep myself the possibilty of hosting a DNS server. So the only
choice was between 80 and 443.
# Considerations
Initially I went with port 80 because it would be the safest bet: You cannot
block port 80 while still allowing customers to access the web. This would
have probably worked out, but I changed it to port 443 later-on. The reason
being that I need port 80 for Let's Encrypt challenges. Since I use nginx
as a reverse proxy for most of my services, I thought that I can multiplex
port 80 between LE and *prosody*. This was not possible with nginx.
So I discoverd traefik since it allows such a feat. The only problem is that
it can only route TCP connections based on the
[SNI](https://github.com/containous/traefik/blob/master/docs/content/routing/routers/index.md#rule-1). This requires the
XMPP connection to be encrypted entirely, not after STARTTLS negotiation,
which means that I would have to configure *prosody* to allow such a
connection and not offer STARTTLS.
# Prosody
Prosody has in its documentation no mentions of *direct TLS* which made me
guess that there is no support for it in *prosody*. After, however, asking
in the support group, I was told that this feature is called *legacy_ssl*.
As such, one only has to add
```lua
-- [...]
legacy_ssl_ports = { 5223 }
legacy_ssl_ssl = {
[5223] = {
key = "/path/to/keyfile";
certificate = "/path/to/certificate";
}
}
-- [...]
```
*Note:* In my testing, *prosody* would not enable *legacy_ssl* unless I
explicitly set `legacy_ssl_ports`.
When *prosody* tells you that it enabled `legacy_ssl` on the specified
ports, then you can test the connection by using OpenSSL to connect to it:
`openssl s_client -connect your.domain.example:5223`. OpenSSL should tell
you the data it can get from your certificate.
# traefik
In my configuration, I run *prosody* in an internal *Docker* network. In
order to connect it, in my case port 5223, to the world via port 443, I
configured my traefik to distinguish between HTTPS and XMPPS connections
based on the set SNI of the connection.
To do so, I firstly configured the static configuration to have
port 443 as an entrypoint:
```yaml
# [...]
entrypoints:
https:
address: ":443"
# [...
```
For the dynamic configuration, I add two routers - one for TCP, one for
HTTPS - that both listen on the entrypoint `https`. As the documentation
[says](https://github.com/containous/traefik/blob/master/docs/content/routing/routers/index.md#general-1),
*"If both HTTP routers and TCP routers listen to the same entry points, the TCP routers will apply before the HTTP routers."*. This means that traefik has
to distinguish the two somehow.
We do this by using the `Host` rule for the HTTP router and `HostSNI` for
the TCP router.
As such, the dynamic configuration looks like this:
```yaml
tcp:
routers:
xmpps:
entrypoints:
- "https"
rule: "HostSNI(`xmpps.your.domain.example`)"
service: prosody-dtls
tls:
passthrough: true
# [...]
services:
prosody-dtls:
loadBalancer:
servers:
- address: "<IP>:5223"
http:
routers:
web-secure:
entrypoints:
- "https"
rule: "Host(`web.your.domain.example`)"
service: webserver
```
It is important to note here, that the option `passthrough` has to be `true`
for the TCP router as otherwise the TLS connection would be terminated by
traefik.
Of course, you can instruct prosody to use port 443 directly, but I prefer
to keep it like this so I can easily see which connection goes to where.
# HTTP Upload
HTTP Upload was a very simple to implement this way. Just add another HTTPS
route in the dynamic traefik configuration to either the HTTP port of
prosody, which would terminate the TLS connection from traefik onwards, or
the HTTPS port, which - if running traefik and prosody on the same host -
would lead to a possible unnecessary re-encryption of the data.
This means that prosody's configuration looks like this:
```lua
[...]
-- Perhaps just one is enough
http_ports = { 5280 }
https_ports = { 5281 }
Component "your.domain"
-- Perhaps just one is required, but I prefer to play it safe
http_external_url = "https://http.xmpp.your.domain"
http_host = "http.xmpp.your.domain"
[...]
```
And traefik's like this:
```yaml
[...]
http:
routers:
prosody-https:
entrypoints:
- "https"
rule: "Host(`http.xmpp.your.domain`)"
service: prosody-http
services:
prosody-http:
loadBalancer:
servers:
- "http://prosody-ip:5280"
[...]
```
# DNS
In order for clients to pick this change up, one has to create a DNS SRV
record conforming to [XEP-0368](https://xmpp.org/extensions/xep-0368.html).
This change takes some time until it reaches the clients, so it would be wise
to keep the regular STARTTLS port 5222 open and connected to prosody until
the DNS entry has propagated to all DNS servers.
# Caveats
Of course, there is nothing without some caveats; some do apply here.
This change does not neccessarilly get applied to all clients automatically.
Clients like *Conversations* and its derivatives, however, do that when they
are reconnecting. Note that there may be clients that do not support XEP-0368
which will not apply this change automatically, like - at least in my
testing - *profanity*.
Also there may be some clients that do not support *direct TLS* and thus
cannot connect to the server. In my case, *matterbridge* was unable to
connect as it, without further investigation, can only connect with either
no TLS or with STARTTLS.
# Conclusion
In my case, I run my *prosody* server like this:
```
<<WORLD>>-------------+
| |
[traefik]-------------/|/--------------+
| | |
{xmpp.your.domain} [5269] {other.your.domain}
[443 -> 5223] | [443 -> 80]
{http.xmpp.your.domain} | |
[443 -> 5280] | |
| | |
[prosody]-------------+ [nginx]
```
As I had a different port for *prosody* initially (80), I had to wait until
the DNS records are no longer cached by other DNS servers or clients. This
meant waiting for the TTL of the record, which in my case were 18000 seconds,
or 5 hours.
The port 5222 is, in my case, not reachable from the outside world but via my
internal *Docker* compose network so that my *matterbridge* bridges still work.

33
about.html

@ -0,0 +1,33 @@
---
layout: default
---
<h3>About "PapaTutuWawa"</h3>
<div class="container">
<img
class="profile-picture"
src="assets/img/profile.jpg"
alt="Profile Picture" />
</div>
<div class="quote">
Student, Anime expert, Vocaloid listener, Docker and Linux fan and hobby SysAdmin.
</div>
<ul>
<li><a href="https://{{ site.social.git }}/">Code</a></li>
<li><a href="https://{{ site.social.blog }}/">Blog</a></li>
<li><a href="https://{{ site.social.mastodon }}/">Mastodon</a></li>
</ul>
<h3>Contact</h3>
<ul>
<li>EMail: <i>{{ site.social.email.address }} ["a" with a weird circle] {{ site.social.email.domain }}</i> (<a href="{{ site.social.email.gpg}}">GPG</a>)</li>
</ul>
<h3>About This Page</h3>
<ul>
<li><a href="https://{{ site.social.git }}/PapaTutuWawa/blog.polynom.me">Source</a></li>
<li>Last updated <i>{{ "now" | date: "%D at %T" }}</i></li>
</ul>

11
archive.html

@ -0,0 +1,11 @@
---
layout: default
---
<h2><center>Archive</center></h2>
<ul>
{% for post in site.posts %}
<li><a href="{{ post.url }}">{{ post.title }}</a></li>
{% endfor %}
</ul>

94
assets/css/index.css

@ -0,0 +1,94 @@
/* Fonts */
@font-face {
font-family: roboto;
src: url(/assets/fonts/Roboto-Regular.ttf)
}
/* Element styling */
html {
background-color: #212121;
color: #ffffff;
font-family: Roboto;
}
footer {
width: 100%;
}
h1, h2, h3 {
margin-bottom: 2px;
}
a {
color: white;
}
ul {
margin-top: 2px;
}
img {
/* Prevent images in blog posts from getting too big */
max-width: 800px;
}
/* Layouting */
.container {
display: flex;
justify-content: center;
width: 100%;
}
.horizontal {
flex-direction: row;
}
.vertical {
display: flex;
flex-direction: column;
}
.subbar-link {
padding: 5px;
}
.post-list-item {
display: block;
max-width: 600px;
}
.post {
display: block;
max-width: 800px;
}
.title-sub {
margin-top: 0px;
margin-bottom: 10px;
}
.title-sub-subbar {
margin-top: -20px;
}
/* Text styling */
.highlight {
background-color: #373737;
padding: 4px;
}
.quote {
border-left: 2px;
border-left-style: solid;
border-color: gray;
padding-left: 4px;
font-style: italic;
}
/* Image styling */
.profile-picture {
width: 256px;
height: 256px;
margin-bottom: 10px;
}

BIN
assets/fonts/Roboto-Regular.ttf

BIN
assets/img/as-long-as-it-works.jpg

Before After
Width: 442  |  Height: 592  |  Size: 36 KiB

BIN
assets/img/blog-umatrix.jpg

Before After
Width: 600  |  Height: 250  |  Size: 19 KiB

BIN
assets/img/profile.jpg

Before After
Width: 500  |  Height: 500  |  Size: 44 KiB

BIN
assets/img/raw/as-long-as-it-works.jpg

Before After
Width: 442  |  Height: 592  |  Size: 36 KiB

BIN
assets/img/raw/blog-umatrix.jpg

Before After
Width: 600  |  Height: 250  |  Size: 19 KiB

BIN
assets/img/raw/profile.jpg

Before After
Width: 500  |  Height: 500  |  Size: 44 KiB

BIN
assets/img/raw/serial-cable.jpg

Before After
Width: 3740  |  Height: 1880  |  Size: 257 KiB

BIN
assets/img/serial-cable.jpg

Before After
Width: 3740  |  Height: 1880  |  Size: 257 KiB

27
atom.xml

@ -0,0 +1,27 @@
---
layout: null
---
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
<channel>
<title>{{ site.title | xml_escape }}</title>
<description>{{ site.description | xml_escape }}</description>
<link>{{ site.url }}</link>
<atom:link href="{{ site.url }}/feed.xml" rel="self" type="application/rss+xml" />
<author>
<name>{{ site.author.name }}</name>
<email>{{ site.author.email }}</email>
<uri>{{ site.author.url }}</uri>
</author>
{% for post in site.posts limit:10 %}
<item>
<title>{{ post.title | xml_escape }}</title>
<description>{{ post.content | xml_escape }}</description>
<pubDate>{{ post.date | date: "%a, %d %b %Y %H:%M:%S %z" }}</pubDate>
<link>{{ site.url }}{{ post.url }}</link>
<link href="{{ site.url }}{{ post.url }}"/>
<guid isPermaLink="true">{{ site.url }}{{ post.url }}</guid>
</item>
{% endfor %}
</channel>
</rss>

45
index.html

@ -0,0 +1,45 @@
---
layout: default
---
<h2><center>Recent Posts</center></h2>
{% for post in paginator.posts %}
<div class="post-list-item">
<h3><a href="{{ post.url }}">{{ post.title }}</a></h3>
<p><b>&gt; {{ post.date | date_to_string }}</b></p>
<p class="post-summary">
<div class="quote">
{{ post.excerpt }}
</div>
</p>
</div>
{% endfor %}
{% if paginator.total_pages > 1 %}
<div class="container">
<div class="horizontal">
<div class="pagination">
{% if paginator.previous_page == 1 %}
<a href="{{ '/' | prepend: site.baseurl | replace: '//', '/' }}" class="page-item">&laquo;</a>
{% elsif paginator.previous_page%}
<a href="{{ paginator.previous_page_path | prepend: site.baseurl | replace: '//', '/' }}" class="page-item">&laquo;</a>
{% else %}
<span class="page-item">&laquo;</span>
{% endif %} {% for page in (1..paginator.total_pages) %} {% if page == paginator.page %}
<span class="page-item">{{ page }}</span>
{% elsif page == 1 %}
<a href="{{ '/' | prepend: site.baseurl | replace: '//', '/' }}" class="page-item">{{ page }}</a>
{% else %}
<a href="{{ site.paginate_path | prepend: site.baseurl | replace: '//', '/' | replace: ':num', page }}" class="page-item">{{ page }}</a>
{% endif %} {% endfor %} {% if paginator.next_page %}
<a href="{{ paginator.next_page_path | prepend: site.baseurl | replace: '//', '/' }}" class="page-item">&raquo;</a>
{% else %}
<span class="page-item">&raquo;</span>
{% endif %}
</div>
</div>
</div>
{% endif %}

2
robots.txt

@ -0,0 +1,2 @@
User-agent: *
Disallow: /about.html
Loading…
Cancel
Save