Compare commits

...

193 Commits

Author SHA1 Message Date
kim (grufwub) 60295a28bb use updated error library, remove gopher string_const.go
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 1dce8a6600 update errors library in module files
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 1598f101f1 use updated errors library
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 0ecc21c1de Update errors library
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) d84739d018 Fix core.EscapePath() reserved chars list, add test for EscapePath, add benchmarks core/url path escaping
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) d6df45cd41 pathBuilder improvements
- allocate starts slice with >0 capacity to minimize allocations

- increase initial buffer allocation size to 64

Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) e4187eb229 update build script to write a version file
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) ffa42c0183 bump version to release v3.1.8
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) fd3bf21758 remove unused const
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 627b83c395 move valid root check
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 088730d3cb update example config
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) e0f402cfd9 fix path joining logic
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) cdd55c0f60 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) f2f0b6d615 improve code commenting, add root dir sanitization, improve root + chroot logic handling
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 2d1b5a9ab3 version bump, for user dir lookup their system user dir instead of assuming '/home/...'
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 1d05e31dfb improve pathBuilder commenting, reduce repeated code
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) f7e43d4d10 bump version
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 69d608d5f5 add comments
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) f07e9c05e9 formatting changes, improve core.ReadFile command
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 14abeb7077 fix new line handling for '\n' at position 0 in read count
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 369e8fef13 take into account empty reads, and error on too many
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) f3cc83a2a3 small fix to previous changes...
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 563138f3ec don't bother request read buffering
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 62c005d417 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) f2c3dd60b7 improve conn reading
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) d6bf0f44a6 improve error logging
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 3e182207e0 redirect on empty path, small formatting changes, version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) b2ed836f63 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 6f4714e144 improve code commenting
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 03e7384159 update autogenerated mime types, add mime type generation script
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 39aac0eac8 fix mime type checking, write our own file extension fetcher
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) ee92133c47 improve mime typing
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 5b1f31ca92 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 72ea175925 use hardcoded list of mime types...
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) dccf4bd636 remove old library versions from go module files
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) bff3b23b49 update go-bufpools version
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) aaddfe3679 udpate go-logger again...
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 692d439180 update go-logger version
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 259d01fdd6 update go-filecache versions
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 0ba05dc956 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 37e257fcc8 update go-config version
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 13227bbd21 use new version of go-errors (much improved syntax, *hopefully* more performant)
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) d43597b2fa version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) db73c33fa2 no max CGI runtime
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 5d44d9c0e0 upgrade deps
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 40a5e6e70e further improve initial CGI path stripping
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 27366e224e fix CGI dir request panic
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) ea83ab3f78 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) eef30c742f improved request line-end check
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 41f0ab6b1c fix policy file formatting, be leniant on line ends again...
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 81ff43ab29 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) f155e76661 update policy file
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 230a979993 actually you know what, fuck you, be LESS leniant with request line end. SPEC COMPLIANCE FTW
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) c95d2e8939 be a bit more leniant on line-endings
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 352cebcf50 fix version printing
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) c842ae78ad improve string constant names and log printing
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 8a541b1369 update README format
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
3 years ago
kim (grufwub) 5aeb201cc0 further explain chroot reasonings in docs
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: b1109ab22d6d25912c2bce8e9cdc4a4ac6bfab49
3 years ago
kim (grufwub) b2cbcc32f3 update docs + README
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: eef14c7da280da725611d07a5ae9d2f4fe0018d0
3 years ago
kim (grufwub) 2e97b28ce5 remove build files
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: ce029c62531c22130f80ba329fb3e4343db4bc2f
3 years ago
kim (grufwub) 1d17310377 update gitignore
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: b2ba8a6d0b75d91a49b0fda1c3029c66f8370233
3 years ago
kim (grufwub) e3aee14034 add updated builds based on 3.1.7
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: ba526b1ea747c4af12cde9413834ca843669720c
3 years ago
kim (grufwub) 99b9d041fc update CGI docs
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 6e5f0786a69020a81f1cb0daf23cad1874546ea5
3 years ago
kim (grufwub) 239e7dd07d small CGI var rename
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: e0adca714c1035567e1047a75f7d7b3ee2622f5f
3 years ago
kim (grufwub) c9436be644 drop beta tag before merge to master
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: ca63b4843c5468796dad464f4309196dccf05556
3 years ago
kim (grufwub) 97f30066a0 update binary releases
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 78c134a1a64c0b28a3297c5d5e8b01e104fdc0e9
3 years ago
kim (grufwub) bdc0c427de remove unused global var
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: c02ced98c7e01c4876248a1dac3cb3a53c5b6d8a
3 years ago
kim (grufwub) f5be927a26 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 3f5406a7722642cdbd7564b5622a40d3d0efd7d7
3 years ago
kim (grufwub) 29a8030fec initial support for PATH_INFO in CGI scripts
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: f7edadb691461b598d1f9693ac180bce99a64fdb
3 years ago
kim (grufwub) 6eb1f5cde7 add note about sslv3 being deprecated
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 2a8fd55be0c4429b0b000ced74f1321bc9100c23
3 years ago
kim (grufwub) 3385c71f2f move note about chroots to docs/MISC.md
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 8c1a294611841c8c5e5ba027f8cd9dff6876006e
3 years ago
kim (grufwub) 4b580976c8 update compiled binaries
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 23e9ea83111658f5373f93a4a01ac856aa85112a
4 years ago
kim (grufwub) f919fac0a0 update build script to fix building
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: a0195439d5fc7d8d6d92909a4b72d5e15e03532a
4 years ago
kim (grufwub) df5f14c440 update CGI docs with proto specific vars
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 3df1b10f81b37621bb3bb3a76b0288f3e5690b30
4 years ago
kim (grufwub) 32aebac22a use our own path cleaner (not path.Clean()) which allows buffer pooling, version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: ea35c82dc1151d7951964444590f771e5c822706
4 years ago
kim (grufwub) ce73853fd0 improve CGI exit code fetching
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: d5dc759452e5be776500e265c8f466465c285ec3
4 years ago
kim (grufwub) 21dabb5050 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: dce2b07715af4ca200e9dcb08c389471dd32f32c
4 years ago
kim (grufwub) 1d37e4461d add todos to README
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: dea94deebdfc82ca16a93c1e05631bee3280060e
4 years ago
kim (grufwub) afbcaa4a1c improve Client struct ip + port handling (only gen ip string once)
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 0b0fcb126915454146b3202e5327bcb605fab8fa
4 years ago
kim (grufwub) 4066ff11f9 add support for protocol specific CGI environments
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 3a465a080010d59e4679f7cbbc128c71d4f7b993
4 years ago
kim (grufwub) 30bf8d5b80 make build/ directory if necessary
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 16a7951350aef928ed0c568c7979dc3b075326c9
4 years ago
kim (grufwub) 8c207d1cab consolidate scripts to build.sh, move out of scripts/
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 5bdf3098f154bd8e7869d54242561a25706edacf
4 years ago
kim (grufwub) aa21e807c4 add binary build files
Signed-off-by: kim (grufwub) <grufwub@gmail.com>

Former-commit-id: 0cbccd570bee8301cbab5464ed7059671a608db1
4 years ago
kim (grufwub) 6064d8124b see previous commit
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 4553cb6897 update gitignore to allow binary build files
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) eab3e38dd7 add note about binary release location
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 551d99578b move scripts to scripts/
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 06b50ff573 improved build scripts
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) df2bc06360 add TLS certs + TOML configs to gitignore
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) ee67c63219 add gophi protocol to version print
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 1f8e62e0b2 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 67f249c242 append '/' to end of dirs in directory listings
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) b14b3302b3 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 0e558ff28f perform more path cleaning, use our own path joining func (is faster)
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 45f9f92a66 CGI fixes
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) aedd30eb81 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 91b3f12c20 add generate_cert.sh script
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 69b2fed568 update example config
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 3c72f14ab6 update go-errors
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) b7625eef64 better conform to URL standards (escape host part too!), do our own escaping, allow user CGI dirs
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 1a141c5fe4 add note about chrooting
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 7fcf5824bc actually make the binary releases a link...
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) c5a7d3d301 add binary release link to readme
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) b2cd388b56 make latest Go version check conditional on being compiled for linux
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) e0473efb9d add future plans to readme
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) d899881c88 conform more to gemini request standards, improve max request size and request reading
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) e0742bbb1b update README
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 4e7e28dba7 add to features
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 2675a14351 add support for privilege dropping and chroots back!
note: check added to ensure it isn't compiled with a Go
version without fixed syscalls

Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 214365ee2c version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) d1d9ead618 shuffle
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 65e1b1a879 update docs
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) a5d6fa361d update example config
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) de07f8825f small fixes
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 7450bc417f version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 0bdca86a83 move example.toml to docs/
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 1cce82a480 switch to using config file, no flags. remove fwd port
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 5d4b97c607 update README, fix gemini errors
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) be23439933 same again
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) f2e6381c3a ACTUALLY fix gemini dir listing
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 0f559836ee fix gemini dir listing
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 0f83dcb9c7 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 265007046b fix gemini dir listings
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 1335a51024 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 9816b6c717 add gemini to README
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) aa2c0ee14d stop generating robots.txt, always generate caps.txt
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) ba2bc9f033 User dir is now not customizeable, improve gemini request parsing
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 797ad97e1c remove unused error
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) d201f98573 add gemini build script
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) ca81ea0e6e add initial support for gemini, version bump to v3.0.0-alpha
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 4401fa0499 remove github specific git features
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) e3e8acd163 add note on index search to docs
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) c31b04c745 finish switching params->query
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) ad0545b49e switch to use 'query' correctly instead of params when talking about the request, url etc
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) e011e71923 fix adding index search to params...
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 7733b88594 actually use the query variable...
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) eb89e4044f remove old SCGI docs, add better handling of index search + Gopher+ requests
NOTE: even if we don't explicity support index search / Gopher+

Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 41ad240e26 add back entry to directory listings
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) f8c9c738ab improve dir reading
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) a6108587cd improve file mode checking
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) f689409692 fix regex
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) eafdc58d40 add support for following symlinks
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) ae3b8f1f4b version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) f9c81b5ffd fix formatting of typeInfoNotState + image lines
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) f8718ad1b6 slight terminology change
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 4c52f96af6 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 8aef4f13a2 update conn timeout defaults
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 8242b4eb35 fix item type matching, bump version
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) a47f958499 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 07b5ea1d7c remove SCGI support (i have opinions...)
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) d473872bd0 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) af23ed865a small fix..
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 781087c9ba version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 75fb6be3f2 make client return statement more readable
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 55c34db7b7 rename deadlines to timeouts
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) a5c6e1080c add SCGI deadlines, improve code commenting + organization
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 08101e5da5 better explain SCGI support
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) d1150ad6a0 add SCGI to list of features
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) b7879a105a version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 6e6115ae54 add SCGI compliance docs
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) c1c15b4266 improve SCGI socket fetching from request
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 145f54b85d add initial support for SCGI
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) e36f1ecdb9 update docs with the CGI env vars we set
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 69dc4b66c6 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 6502922a4f fix panic
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 56e5eff136 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) be8a92edfb update to work with latest go-filecache
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 3ffba65c76 variable name changes
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 2dd51728a4 minor consistency changes
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 0c68a84861 use updated (fixed) errors and filecache libraries
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 748622c8e1 use updated errors library
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 0981f37f54 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 7ea9a90f97 better error handling, better filesystem, use own external libraries
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) fbdd422493 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) f91493663b use more efficient item type parsing for files
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) c7a3f33660 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 69df7e257a compile gophermap regex in variable definition, no need for function
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 0f73b3255d remove unnecessary CGIStatus error codes
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 3a47c21aec remove http compat from docs
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 89a3ab743d remove unnecessary http strip writer
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) aef3d9f269 remove HTTP compatiblity mode for CGI scripts
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 5248e27528 remove unnecessary function
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 47aeb49681 switch back to using 'bind-addr' flag
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) e4737ae616 further improve request full string
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 216d2df070 remember to add leading '/' to full request string!
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) f23b452cb9 version bump
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 549aa31158 improve gopher request logging
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 52a75eacec make SplitBy function public
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 55b5bd86a6 use grufwub/go-logger library
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 7cb9a0045b remove the SCRAP.txt file
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 8477da7698 correct more 'gophor' references
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) b238b45b20 update docs to reference new name
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago
kim (grufwub) 433de3eeab use external go-bufpools library
Signed-off-by: kim (grufwub) <grufwub@gmail.com>
4 years ago

@ -1,3 +0,0 @@
# These are supported funding model platforms
liberapay: grufwub

8
.gitignore vendored

@ -1,4 +1,8 @@
gophi.*
*.log
*.old
build*/
*.crt
*.key
*.toml
build/
gophi.gemini
gophi.gopher

@ -1,25 +1,31 @@
# Gophi
A server with Gopher (& hopefully soon, Gemini) protocol support in GoLang!
A server with Gopher & Gemini protocol support in GoLang!
Unix (like) OSes only. Cross-compiled to way too many architectures.
Build-script now much improved, but still not pretty...
I work on open-source projects like this and many others for
free. If you would like to help support my work that would be hugely
appreciated 💕 https://liberapay.com/grufwub/
appreciated 💕 [donate here](https://liberapay.com/grufwub/)
WARNING: the (currently non-extant) development branch is filled with lava,
fear and capitalism.
Binary releases at:
- `gemini://iim.gay/files/gophi/`
- `gopher://iim.gay/1/files/gophi/`
For now, Gophi compilation is reliant on a version of Go that has latest POSIX
syscall fixes cherry picked, e.g. [my fork](https://github.com/grufwub/go/tree/go1.15.5-posix-syscall-fix)
# Features
- Built with security, concurrency and efficiency in mind
- ZERO external dependencies
- LRU file caching with user-controlled cache size, max cached file size,
refresh frequency and max file age before being marked as stale
- LRU file caching with user-controlled cache size, max cached file size
and cache refresh frequency
- Buffer pools to minimize allocations
- CGI/1.1 support
@ -33,12 +39,34 @@ WARNING: the (currently non-extant) development branch is filled with lava,
- Separate system and access logging
- Chroot and dropping permissions to provided user+group (see [extra notes](docs/MISC.md)
for why you may want to do this over manually executing binary in a chroot)
- Only 1 external dependency, `go-toml` brought in by my own `go-config`
library
# Further Docs
[Usage](docs/USAGE.md)
[Example Configuration](docs/example.toml)
[CGI compliance](docs/CGI.md)
[Gopher specific information](docs/GOPHER.md)
[Gemini specific information](docs/GEMINI.md)
[Extra notes](docs/MISC.md)
# Future plans
- improve the build scripts...
- gemini vhosts
- chroot setup script
- Gemini enforce TLS client validity / certificate zones
- SCGI

@ -1,2 +0,0 @@
- preallocate slices
- use sync.Pool ? (for byte buffers?)

@ -1,162 +0,0 @@
#!/bin/sh
set -e
PROJECT='gophi.gopher'
VERSION="$(cat 'core/server.go' | grep -E '^\s*Version' | sed -e 's|\s*Version = \"||' -e 's|\"\s*$||')"
LOGFILE='build.log'
OUTDIR="build-gopher-${VERSION}"
upx_compress() {
local level="$1" filename="$2" topack="${2}.topack"
cp "$filename" "$topack"
if (upx "$level" "$topack" >> "$LOGFILE" 2>&1); then
if (upx --test "$topack"); then
mv "$topack" "$filename"
return 0
else
rm "$topack"
return 1
fi
else
rm "$topack"
return 1
fi
}
compress() {
local filename="$1"
echo "Attempting to compress ${filename}..."
if (upx_compress '--ultra-brute' "$filename"); then
echo "Compressed with --ultra-brute!"
elif (upx_compress '--best' "$filename"); then
echo "Compressed with --best!"
elif (upx_compress '' "$filename"); then
echo "Compressed with no flags."
else
echo "Compression failed!"
fi
}
build_for() {
local archname="$1" toolchain="$2" os="$3" arch="$4"
shift 4
if [ "$arch" = 'arm' ]; then
local armversion="$1"
shift 1
fi
echo "Building for ${os} ${archname} with ${toolchain}..."
local filename="${OUTDIR}/${PROJECT}_${os}_${archname}"
CC="${toolchain}-gcc" CGO_ENABLED=0 GOOS="$os" GOARCH="$arch" GOARM="$armversion" go build -trimpath -o "$filename" "$@" 'cmd/gopher/main.go' >> "$LOGFILE" 2>&1
if [ "$?" -ne 0 ]; then
echo "Failed!"
return 1
fi
compress "$filename"
}
echo "PLEASE BE WARNED THIS SCRIPT IS WRITTEN FOR A VOID LINUX (MUSL) BUILD ENVIRONMENT"
echo "YOUR CC TOOLCHAIN LOCATIONS MAY DIFFER"
echo "IF THE SCRIPT FAILS, CHECK THE OUTPUT OF: ${LOGFILE}"
echo ""
# Clean logfile
rm -f "$LOGFILE"
# Clean and recreate directory
rm -rf "$OUTDIR"
mkdir -p "$OUTDIR"
# Build time :)
# Linux
build_for '386' 'i686-linux-musl' 'linux' '386' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'amd64' 'x86_64-linux-musl' 'linux' 'amd64' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5' 'arm-linux-musleabi' 'linux' 'arm' '5' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5hf' 'arm-linux-musleabihf' 'linux' 'arm' '5' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6' 'arm-linux-musleabi' 'linux' 'arm' '6' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6hf' 'arm-linux-musleabihf' 'linux' 'arm' '6' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv7lhf' 'armv7l-linux-musleabihf' 'linux' 'arm' '7' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'arm64' 'aarch64-linux-musl' 'linux' 'arm64' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'mips' 'mips-linux-musl' 'linux' 'mips' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'mipshf' 'mips-linux-muslhf' 'linux' 'mips' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'mipsle' 'mipsel-linux-musl' 'linux' 'mipsle' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'mipslehf' 'mipsel-linux-muslhf' 'linux' 'mipsle' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'ppc64le' 'powerpc64le-linux-musl' 'linux' 'ppc64le' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Netbsd
build_for '386' 'i686-linux-musl' 'netbsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'amd64' 'x86_64-linux-musl' 'netbsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5' 'arm-linux-musleabi' 'netbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5hf' 'arm-linux-musleabihf' 'netbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6' 'arm-linux-musleabi' 'netbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6hf' 'arm-linux-musleabihf' 'netbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv7lhf' 'armv7l-linux-musleabihf' 'netbsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'arm64' 'aarch64-linux-musl' 'netbsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Openbsd
build_for '386' 'i686-linux-musl' 'openbsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'amd64' 'x86_64-linux-musl' 'openbsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5' 'arm-linux-musleabi' 'openbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5hf' 'arm-linux-musleabihf' 'openbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6' 'arm-linux-musleabi' 'openbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6hf' 'arm-linux-musleabihf' 'openbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv7lhf' 'armv7l-linux-musleabihf' 'openbsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'arm64' 'aarch64-linux-musl' 'openbsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Freebsd
build_for '386' 'i686-linux-musl' 'freebsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'amd64' 'x86_64-linux-musl' 'freebsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5' 'arm-linux-musleabi' 'freebsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv5hf' 'arm-linux-musleabihf' 'freebsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6' 'arm-linux-musleabi' 'freebsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv6hf' 'arm-linux-musleabihf' 'freebsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'armv7lhf' 'armv7l-linux-musleabihf' 'freebsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'arm64' 'aarch64-linux-musl' 'freebsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Dragonfly
build_for 'amd64' 'x86_64-linux-musl' 'dragonfly' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Macos
build_for '386' 'i686-linux-musl' 'darwin' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'amd64' 'x86_64-linux-musl' 'darwin' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'

@ -1,3 +0,0 @@
#!/bin/sh
CC='x86_64-linux-musl-gcc' CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -trimpath -buildmode 'pie' -a -v -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"' -o 'gophi.gopher' 'cmd/gopher/main.go'

@ -0,0 +1,173 @@
#!/bin/sh
VERSION="$(cat 'core/server.go' | grep -E '^\s*Version' | sed -e 's|\s*Version = \"||' -e 's|\"\s*$||')" \
|| {
echo 'Failed to get gophi version!'
exit 1
}
GOROOT="${HOME}/Projects/github.com/grufwub/go"
PATH="${GOROOT}/bin:${PATH}"
build_for() {
# Grab build information
local protocol="$1" archname="$2" toolchain="$3" os="$4" arch="$5"
shift 5
if [ "$arch" = 'arm' ]; then
local armversion="$1"
shift 1
fi
# Generate unique filename
local filename="build/gophi.${protocol}_${os}_${archname}"
# Build binary!
echo "Building for ${os} ${archname} with ${toolchain}..."
CC="${toolchain}-gcc" CGO_ENABLED=0 GOOS="$os" GOARCH="$arch" GOARM="$armversion" go build -trimpath -o "$filename" "$@" "cmd/${protocol}/main.go" \
|| {
echo 'Failed!'
return 1
}
}
echo "PLEASE BE WARNED THIS SCRIPT IS WRITTEN FOR A VOID LINUX (MUSL) BUILD ENVIRONMENT"
echo "YOUR CC TOOLCHAIN LOCATIONS MAY DIFFER"
echo "IF THE SCRIPT FAILS, CHECK THE OUTPUT OF: ${LOGFILE}"
echo ""
# Clean build directory
rm -rf build/; mkdir -p build/
# Write the version
echo "$VERSION" > 'build/version.txt'
# Build time :)
# Linux
build_for 'gopher' '386' 'i686-linux-musl' 'linux' '386' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' '386' 'i686-linux-musl' 'linux' '386' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'amd64' 'x86_64-linux-musl' 'linux' 'amd64' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'amd64' 'x86_64-linux-musl' 'linux' 'amd64' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5' 'arm-linux-musleabi' 'linux' 'arm' '5' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5' 'arm-linux-musleabi' 'linux' 'arm' '5' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5hf' 'arm-linux-musleabihf' 'linux' 'arm' '5' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5hf' 'arm-linux-musleabihf' 'linux' 'arm' '5' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6' 'arm-linux-musleabi' 'linux' 'arm' '6' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6' 'arm-linux-musleabi' 'linux' 'arm' '6' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6hf' 'arm-linux-musleabihf' 'linux' 'arm' '6' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6hf' 'arm-linux-musleabihf' 'linux' 'arm' '6' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv7lhf' 'armv7l-linux-musleabihf' 'linux' 'arm' '7' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv7lhf' 'armv7l-linux-musleabihf' 'linux' 'arm' '7' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'arm64' 'aarch64-linux-musl' 'linux' 'arm64' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'arm64' 'aarch64-linux-musl' 'linux' 'arm64' -buildmode 'pie' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'mips' 'mips-linux-musl' 'linux' 'mips' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'mips' 'mips-linux-musl' 'linux' 'mips' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'mipshf' 'mips-linux-muslhf' 'linux' 'mips' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'mipshf' 'mips-linux-muslhf' 'linux' 'mips' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'mipsle' 'mipsel-linux-musl' 'linux' 'mipsle' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'mipsle' 'mipsel-linux-musl' 'linux' 'mipsle' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'mipslehf' 'mipsel-linux-muslhf' 'linux' 'mipsle' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'mipslehf' 'mipsel-linux-muslhf' 'linux' 'mipsle' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'ppc64' 'powerpc64-linux-musl' 'linux' 'ppc64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'ppc64' 'powerpc64-linux-musl' 'linux' 'ppc64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'ppc64le' 'powerpc64le-linux-musl' 'linux' 'ppc64le' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'ppc64le' 'powerpc64le-linux-musl' 'linux' 'ppc64le' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Netbsd
build_for 'gopher' '386' 'i686-linux-musl' 'netbsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' '386' 'i686-linux-musl' 'netbsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'amd64' 'x86_64-linux-musl' 'netbsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'amd64' 'x86_64-linux-musl' 'netbsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5' 'arm-linux-musleabi' 'netbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5' 'arm-linux-musleabi' 'netbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5hf' 'arm-linux-musleabihf' 'netbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5hf' 'arm-linux-musleabihf' 'netbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6' 'arm-linux-musleabi' 'netbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6' 'arm-linux-musleabi' 'netbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6hf' 'arm-linux-musleabihf' 'netbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6hf' 'arm-linux-musleabihf' 'netbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv7lhf' 'armv7l-linux-musleabihf' 'netbsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv7lhf' 'armv7l-linux-musleabihf' 'netbsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'arm64' 'aarch64-linux-musl' 'netbsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'arm64' 'aarch64-linux-musl' 'netbsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Openbsd
build_for 'gopher' '386' 'i686-linux-musl' 'openbsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' '386' 'i686-linux-musl' 'openbsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'amd64' 'x86_64-linux-musl' 'openbsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'amd64' 'x86_64-linux-musl' 'openbsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5' 'arm-linux-musleabi' 'openbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5' 'arm-linux-musleabi' 'openbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5hf' 'arm-linux-musleabihf' 'openbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5hf' 'arm-linux-musleabihf' 'openbsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6' 'arm-linux-musleabi' 'openbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6' 'arm-linux-musleabi' 'openbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6hf' 'arm-linux-musleabihf' 'openbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6hf' 'arm-linux-musleabihf' 'openbsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv7lhf' 'armv7l-linux-musleabihf' 'openbsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv7lhf' 'armv7l-linux-musleabihf' 'openbsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'arm64' 'aarch64-linux-musl' 'openbsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'arm64' 'aarch64-linux-musl' 'openbsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Freebsd
build_for 'gopher' '386' 'i686-linux-musl' 'freebsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' '386' 'i686-linux-musl' 'freebsd' '386' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'amd64' 'x86_64-linux-musl' 'freebsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'amd64' 'x86_64-linux-musl' 'freebsd' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5' 'arm-linux-musleabi' 'freebsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5' 'arm-linux-musleabi' 'freebsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv5hf' 'arm-linux-musleabihf' 'freebsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv5hf' 'arm-linux-musleabihf' 'freebsd' 'arm' '5' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6' 'arm-linux-musleabi' 'freebsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6' 'arm-linux-musleabi' 'freebsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv6hf' 'arm-linux-musleabihf' 'freebsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv6hf' 'arm-linux-musleabihf' 'freebsd' 'arm' '6' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'armv7lhf' 'armv7l-linux-musleabihf' 'freebsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'armv7lhf' 'armv7l-linux-musleabihf' 'freebsd' 'arm' '7' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'arm64' 'aarch64-linux-musl' 'freebsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'arm64' 'aarch64-linux-musl' 'freebsd' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Dragonfly
build_for 'gopher' 'amd64' 'x86_64-linux-musl' 'dragonfly' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'amd64' 'x86_64-linux-musl' 'dragonfly' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
# Macos
build_for 'gopher' 'amd64' 'x86_64-linux-musl' 'darwin' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'amd64' 'x86_64-linux-musl' 'darwin' 'amd64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gopher' 'arm64' 'aarch64-linux-musl' 'darwin' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'
build_for 'gemini' 'arm64' 'aarch64-linux-musl' 'darwin' 'arm64' -buildmode 'default' -a -tags 'netgo osusergo static_build' -ldflags '-s -w -extldflags "-static"'

@ -0,0 +1,9 @@
package main
import (
"gophi/gemini"
)
func main() {
gemini.Run()
}

@ -1,77 +0,0 @@
package core
import "container/list"
// element wraps a map key and value
type element struct {
key string
value *file
}
// lruCacheMap is a fixed-size LRU hash map
type lruCacheMap struct {
hashMap map[string]*list.Element
list *list.List
size int
}
// newLRUCacheMap returns a new LRUCacheMap of specified size
func newLRUCacheMap(size int) *lruCacheMap {
return &lruCacheMap{
// size+1 to account for moment during put after adding new value but before old value is purged
make(map[string]*list.Element, size+1),
&list.List{},
size,
}
}
// Get returns file from LRUCacheMap for key
func (lru *lruCacheMap) Get(key string) (*file, bool) {
lElem, ok := lru.hashMap[key]
if !ok {
return nil, ok
}
// Move element to front of the list
lru.list.MoveToFront(lElem)
// Get Element and return *File value from it
element, _ := lElem.Value.(*element)
return element.value, ok
}
// Put file in LRUCacheMap at key
func (lru *lruCacheMap) Put(key string, value *file) {
lElem := lru.list.PushFront(&element{key, value})
lru.hashMap[key] = lElem
if lru.list.Len() > lru.size {
// Get element at back of list and Element from it
lElem = lru.list.Back()
element, _ := lElem.Value.(*element)
// Delete entry in hashMap with key from Element, and from list
delete(lru.hashMap, element.key)
lru.list.Remove(lElem)
}
}
// Remove file in LRUCacheMap with key
func (lru *lruCacheMap) Remove(key string) {
lElem, ok := lru.hashMap[key]
if !ok {
return
}
// Delete entry in hashMap and list
delete(lru.hashMap, key)
lru.list.Remove(lElem)
}
// Iterate performs an iteration over all key:value pairs in LRUCacheMap with supplied function
func (lru *lruCacheMap) Iterate(iterator func(key string, value *file)) {
for key := range lru.hashMap {
element := lru.hashMap[key].Value.(*element)
iterator(element.key, element.value)
}
}

@ -1,94 +1,107 @@
package core
import (
"bytes"
"io"
"os/exec"
"strings"
"syscall"
"time"
)
var (
protocol string
// cgiEnv holds the global slice of constant CGI environment variables
cgiEnv []string
// maxCGIRunTime specifies the maximum time a CGI script can run for
maxCGIRunTime time.Duration
// httpPrefixBufSize specifies size of the buffer to use when skipping HTTP headers
httpPrefixBufSize int
// ExecuteCGIScript is a pointer to the currently set CGI execution function
ExecuteCGIScript func(*Client, *Request) Error
"github.com/grufwub/go-errors"
)
// setupInitialCGIEnv takes a safe PATH, uses other server variables and returns a slice of constant CGI environment variables
func setupInitialCGIEnv(safePath string) []string {
env := make([]string, 0)
SystemLog.Info("CGI safe path: %s", safePath)
// Append the default environment
env := make([]string, 12)
env = append(env, "GATEWAY_INTERFACE=CGI/1.1")
env = append(env, "SERVER_SOFTWARE=Gophi "+Version)
env = append(env, "SERVER_PROTOCOL="+protocol)
env = append(env, "REQUEST_METHOD=GET") // always GET (in HTTP terms anywho)
env = append(env, "CONTENT_LENGTH=0") // always 0
env = append(env, "PATH="+safePath)
env = append(env, "SERVER_NAME="+Hostname)
env = append(env, "SERVER_PORT="+FwdPort)
env = append(env, "SERVER_PORT="+Port)
env = append(env, "DOCUMENT_ROOT="+Root)
env = append(env, "PATH="+safePath)
// Return string slice of environment variables
return env
}
// generateCGIEnv takes a Client, and Request object, the global constant slice and generates a full set of CGI environment variables
func generateCGIEnv(client *Client, request *Request) []string {
// generateCGIEnv takes a Client, and Request object, uses the global constant slice and generates a full set of CGI environment variables
func generateCGIEnv(client *Client, request *Request, pathInfo string) []string {
// Append
env := append(cgiEnv, "REMOTE_ADDR="+client.IP())
env = append(env, "QUERY_STRING="+request.Params())
env = append(env, "QUERY_STRING="+request.Query())
env = append(env, "SCRIPT_NAME="+request.Path().Relative())
env = append(env, "SCRIPT_FILENAME="+request.Path().Absolute())
env = append(env, "SELECTOR="+request.Path().Selector())
env = append(env, "REQUEST_URI="+request.Path().Selector())
env = append(env, "PATH_INFO="+pathInfo)
env = appendCgiEnv(client, request, env)
return env
}
// executeCGIScriptNoHTTP executes a CGI script, responding with output to client without stripping HTTP headers
func executeCGIScriptNoHTTP(client *Client, request *Request) Error {
return execute(client.Conn().Writer(), request.Path(), generateCGIEnv(client, request))
}
// TryExecuteCGIScript attempts to execute supplied CGI script, finding shortest valid path and setting PATH_INFO accordingly
func TryExecuteCGIScript(client *Client, request *Request) error {
// Get relative path with CGI dir stripped
partial := request.Path().Relative()[len(cgiPath.Relative()):]
if len(partial) == 0 {
return ErrRestrictedPath.Extendf("%s is CGI dir", request.Path().Selector())
}
partial = partial[1:]
// Start with the CGI dir path
nextPath := cgiPath
// Get index of next '/' and resize view of partial
next := strings.IndexByte(partial, '/')
// If there's no slashes, we try straight-off with
// original request
if next == -1 {
stat, err := StatFile(request.Path())
switch {
case err != nil:
return err.(errors.Error).Extend("CGI error")
case !stat.Mode().IsRegular():
return ErrFileType.Extendf("%s CGI error", request.Path().Absolute())
}
return ExecuteCGIScript(client, request, "")
}
// Loop to find first valid path
for next != -1 {
// Join this path segment to current cgiPath
nextPath = nextPath.JoinPathUnsafe(partial[:next])
// Update view of partial
partial = partial[next+1:]
// executeCGIScriptStripHTTP executes a CGI script, responding with output to client, stripping HTTP headers and handling status code
func executeCGIScriptStripHTTP(client *Client, request *Request) Error {
// Create new httpStripWriter
httpWriter := newhttpStripWriter(client.Conn().Writer())
// Check file exists (and not a dir!)
stat, err := StatFile(nextPath)
if err == nil && stat.Mode().IsRegular() {
// Set the updated Path
request.path = nextPath
// Begin executing script
err := execute(httpWriter, request.Path(), generateCGIEnv(client, request))
// Try execute!
return ExecuteCGIScript(client, request, partial)
}
// Parse HTTP headers (if present). Return error or continue letting output of script -> client
cgiStatusErr := httpWriter.FinishUp()
if cgiStatusErr != nil {
return cgiStatusErr
// Get next '/' position
next = strings.IndexByte(partial, '/')
}
return err
// No CGI script was found, return not-found error
return ErrFileStat.Extendf("%s CGI error", request.Path().Absolute())
}
// execute executes something at Path, with supplied environment and ouputing to writer
func execute(writer io.Writer, p *Path, env []string) Error {
// ExecuteCGIScript executes a CGI script, responding with stdout to client
func ExecuteCGIScript(client *Client, request *Request, pathInfo string) error {
// Create cmd object
cmd := exec.Command(p.Absolute())
// Set new process group id
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
cmd := exec.Command(request.Path().Absolute())
// Setup cmd environment
cmd.Env, cmd.Dir = env, p.Root()
cmd.Env = generateCGIEnv(client, request, pathInfo)
cmd.Dir = request.Path().Root()
// Setup cmd out writer
cmd.Stdout = writer
cmd.Stdout = client.Conn().Writer()
// Not interested in err
cmd.Stderr = nil
@ -96,246 +109,29 @@ func execute(writer io.Writer, p *Path, env []string) Error {
// Start executing
err := cmd.Start()
if err != nil {
return WrapError(CGIStartErr, err)
return errors.With(err).WrapWithin(ErrCGIStart).Extend(request.Path().Absolute())
}
// Setup goroutine to kill cmd after maxCGIRunTime
go func() {
// At least let the script try to finish...
time.Sleep(maxCGIRunTime)
// We've already finished
if cmd.ProcessState != nil {
return
}
// NOTE: we don't set a max CGI script run time anymore,
// we let the connection write deadline catch any longrunning
// scripts not sending content.
// Get process group id
pgid, err := syscall.Getpgid(cmd.Process.Pid)
if err != nil {
SystemLog.Fatal(pgidNotFoundErrStr)
}
// Kill process group!
err = syscall.Kill(-pgid, syscall.SIGTERM)
if err != nil {
SystemLog.Fatal(pgidStopErrStr, pgid, err.Error())
}
}()
// Wait for command to finish, get exit code
// Wait for command to finish
err = cmd.Wait()
exitCode := 0
if err != nil {
// Error, try to get exit code
exitError, ok := err.(*exec.ExitError)
if ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
exitCode = waitStatus.ExitStatus()
} else {
// Attempt to get exit code from error
var exitCode int
switch err.(type) {
case *exec.ExitError:
exitCode = err.(*exec.ExitError).Sys().(syscall.WaitStatus).ExitStatus()
default:
exitCode = 1
}
} else {
// No error! Get exit code directly from command process state
waitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus)
exitCode = waitStatus.ExitStatus()
}
// Non-zero exit code? Return error
if exitCode != 0 {
SystemLog.Error(cgiExecuteErrStr, p.Absolute(), exitCode)
return NewError(CGIExitCodeErr)
// Log and return
return ErrCGIExitCode.Extendf("%s: %d", request.Path().Absolute(), exitCode)
}
// Exit fine!
return nil
}
// httpStripWriter wraps a writer, reading HTTP headers and parsing status code, before deciding to continue writing
type httpStripWriter struct {
writer io.Writer
skipBuffer []byte
skipIndex int
err Error
// writeFunc is a pointer to the current underlying write function
writeFunc func(*httpStripWriter, []byte) (int, error)
}
// newhttpStripWriter returns a new httpStripWriter wrapping supplied writer
func newhttpStripWriter(w io.Writer) *httpStripWriter {
return &httpStripWriter{
w,
make([]byte, httpPrefixBufSize),
0,
nil,
writeCheckForHeaders,
}
}
// addToSkipBuffer adds supplied bytes to the skip buffer, returning number added
func (w *httpStripWriter) addToSkipBuffer(data []byte) int {
// Figure out amount to add
toAdd := len(w.skipBuffer) - w.skipIndex
if len(data) < toAdd {
toAdd = len(data)
}
// Add data to skip buffer, return added
copy(w.skipBuffer[w.skipIndex:], data[:toAdd])
w.skipIndex += toAdd
return toAdd
}
// parseHTTPHeaderSection checks if we've received a valid HTTP header section, and determine if we should continue writing
func (w *httpStripWriter) parseHTTPHeaderSection() (bool, bool) {
validHeaderSection, shouldContinue := false, true
for _, header := range strings.Split(string(w.skipBuffer), "\r\n") {
header = strings.ToLower(header)
// Try look for status header
lenBefore := len(header)
header = strings.TrimPrefix(header, "status:")
if len(header) < lenBefore {
// Ensure no spaces + just number
header = strings.Split(header, " ")[0]
// Ignore 200
if header == "200" {
continue
}
// Any other value indicates error, should not continue
shouldContinue = false
// Parse error code
code := CGIStatusUnknownErr
switch header {
case "400":
code = CGIStatus400Err
case "401":
code = CGIStatus401Err
case "403":
code = CGIStatus403Err
case "404":
code = CGIStatus404Err
case "408":
code = CGIStatus408Err
case "410":
code = CGIStatus410Err
case "500":
code = CGIStatus500Err
case "501":
code = CGIStatus501Err
case "503":
code = CGIStatus503Err
}
// Set error code
w.err = NewError(code)
continue
}
// Found a content-type header, this is a valid header section
if strings.Contains(header, "content-type:") {
validHeaderSection = true
}
}
return validHeaderSection, shouldContinue
}
// writeSkipBuffer writes contents of skipBuffer to the underlying writer if necessary
func (w *httpStripWriter) writeSkipBuffer() (bool, error) {
// Defer resetting skipIndex
defer func() {
w.skipIndex = 0
}()
// First try parse the headers, determine next steps
validHeaders, shouldContinue := w.parseHTTPHeaderSection()
// Valid headers received, don't bother writing. Return the shouldContinue value
if validHeaders {
return shouldContinue, nil
}
// Default is to write skip buffer contents, shouldContinue only means something with valid headers
_, err := w.writer.Write(w.skipBuffer[:w.skipIndex])
return true, err
}
func (w *httpStripWriter) FinishUp() Error {
// If skipIndex not zero, try write (or at least parse and see if we need
// to write) remaining skipBuffer. (e.g. if CGI output very short)
if w.skipIndex > 0 {
w.writeSkipBuffer()
}
// Return error if set
return w.err
}
func (w *httpStripWriter) Write(b []byte) (int, error) {
// Write using currently set write function
return w.writeFunc(w, b)
}
// writeRegular performs task of regular write function, it is a direct wrapper
func writeRegular(w *httpStripWriter, b []byte) (int, error) {
return w.writer.Write(b)
}
// writeCheckForHeaders reads input data, checking for headers to add to skip buffer and parse before continuing
func writeCheckForHeaders(w *httpStripWriter, b []byte) (int, error) {
split := bytes.Split(b, []byte("\r\n\r\n"))
if len(split) == 1 {
// Headers found, try to add data to skip buffer
added := w.addToSkipBuffer(b)
if added < len(b) {
defer func() {
// Having written skip buffer, defer resetting write function
w.writeFunc = writeRegular
}()
doContinue, err := w.writeSkipBuffer()
if !doContinue {
return len(b), io.EOF
} else if err != nil {
return added, err
}
// Write remaining data not added to skip buffer
count, err := w.writer.Write(b[added:])
if err != nil {
return added + count, err
}
}
return len(b), nil
}
defer func() {
// No use for skip buffer after belo, set write to regular
w.writeFunc = writeRegular
}()
// Try add what we can to skip buffer
added := w.addToSkipBuffer(append(split[0], []byte("\r\n\r\n")...))
// Write skip buffer data if necessary, check if we should continue
doContinue, err := w.writeSkipBuffer()
if !doContinue {
return len(b), io.EOF
} else if err != nil {
return added, err
}
// Write remaining data not added to skip buffer, to writer
count, err := w.writer.Write(b[added:])
if err != nil {
return added + count, err
}
return len(b), nil
}

@ -7,26 +7,30 @@ import (
// Client holds onto an open Conn to a client, along with connection information
type Client struct {
cn *conn
ip *net.IP
conn *conn
ip string
port string
}
// NewClient returns a new client based on supplied net.TCPConn
func NewClient(conn *net.TCPConn) *Client {
func NewClient(conn net.Conn) *Client {
addr, _ := conn.RemoteAddr().(*net.TCPAddr)
ip, port := &addr.IP, strconv.Itoa(addr.Port)
return &Client{wrapConn(conn), ip, port}
ip, port := addr.IP.String(), strconv.Itoa(addr.Port)
return &Client{
conn: wrapConn(conn),
ip: ip,
port: port,
}
}
// Conn returns the underlying conn
func (c *Client) Conn() *conn {
return c.cn
return c.conn
}
// IP returns the client's IP string
func (c *Client) IP() string {
return c.ip.String()
return c.ip
}
// Port returns the client's connected port
@ -36,10 +40,10 @@ func (c *Client) Port() string {
// LogInfo logs to the global access logger with the client IP as a prefix
func (c *Client) LogInfo(fmt string, args ...interface{}) {
AccessLog.Info("("+c.ip.String()+") "+fmt, args...)
AccessLog.Infof("("+c.ip+") "+fmt, args...)
}
// LogError logs to the global access logger with the client IP as a prefix
func (c *Client) LogError(fmt string, args ...interface{}) {
AccessLog.Error("("+c.ip.String()+") "+fmt, args...)
AccessLog.Errorf("("+c.ip+") "+fmt, args...)
}

@ -0,0 +1,294 @@
package core
import (
"fmt"
"os"
"os/signal"
"os/user"
"strconv"
"sync"
"syscall"
"time"
"github.com/grufwub/go-bufpools"
"github.com/grufwub/go-config"
"github.com/grufwub/go-filecache"
log "github.com/grufwub/go-logger"
)
func usage(code int) {
fmt.Printf("Usage: %s [-v|--version] [-c|--config $file]\n", os.Args[0])
os.Exit(code)
}
// ParseConfigAndSetup parses necessary core server config from file (and any others defined), and sets up the core ready for Start() to be called
func ParseConfigAndSetup(tree config.Tree, proto string, defaultPort uint, newListener func() (*Listener, error), fileContent func(*Path) FileContent, dirHandler func(*Client, *os.File, *Path) error, largeHandler func(*Client, *os.File, *Path) error, appendCgi func(*Client, *Request, []string) []string) {
// Default configuration file location
configFile := "/etc/gophi." + proto + ".conf"
// If we have arguments to handle, do so!
if len(os.Args) > 1 {
switch os.Args[1] {
case "-c", "--config":
if len(os.Args) != 3 {
usage(1)
}
configFile = os.Args[2]
case "-v", "--version":
fmt.Printf("Gophi (%s) %s\n", proto, Version)
os.Exit(0)
default:
usage(1)
}
}
// Core configuration
tree.StringVar(&Root, "root", "/var/"+proto)
tree.StringVar(&Bind, "listen", "")
tree.StringVar(&Hostname, "hostname", "")
port := tree.Uint64("port", uint64(defaultPort))
chroot := tree.String("chroot", "")
username := tree.String("user", "")
groupname := tree.String("group", "")
// Filesystem configuration
fReadBuf := tree.Uint64("filesystem.read-buf", 1024)
tree.DurationVar(&monitorSleepTime, "filesystem.cache.monitor-freq", time.Second*60)
cacheMax := tree.Float64("filesystem.cache.file-max", 1.0)
cacheSize := tree.Uint64("filesystem.cache.size", 100)
cacheAgeMax := tree.Duration("filesystem.cache.age-max", time.Minute*5)
// Request mapping, hiding, restricting
restrictedPathsList := tree.StringArray("requests.restrict", []string{})
hiddenPathsList := tree.StringArray("requests.hidden", []string{})
remapRequestsList := tree.StringArray("requests.remap", []string{})
// Logging configuration
sysLog := tree.String("log.system", "stdout")
accLog := tree.String("log.access", "stdout")
// Connection configuration
tree.DurationVar(&connReadDeadline, "connection.read-timeout", time.Second*5)
tree.DurationVar(&connWriteDeadline, "connection.write-timeout", time.Second*15)
cWriteBuf := tree.Uint64("connection.write-buf", 1024)
cReadMax := tree.Uint64("connection.read-max", 1024)
// CGI configuration
cgiDir := tree.String("cgi.directory", "")
safePath := tree.String("cgi.safe-path", "/bin:/usr/bin")
// User space configuration
userSpacesEnabled := tree.Bool("user-spaces", false)
// Parse provided config file
tree.Parse(configFile)
// Setup loggers
SystemLog = setupLogger(*sysLog)
if *sysLog == *accLog {
AccessLog = SystemLog
} else {
AccessLog = setupLogger(*accLog)
}
// Check valid values for BindAddr and Hostname
if Hostname == "" {
if Bind == "" {
SystemLog.Fatal("At least one of 'hostname' or 'listen' must be non-empty!")
}
Hostname = Bind
}
// Check valid root (i.e. not empty!)
if Root == "" {
SystemLog.Fatal("No server root directory supplied!")
}
// Set port info
Port = strconv.Itoa(int(*port))
// Set protocol string (only really used by CGI and one call to SystemLog)
protocol = proto
// Setup listener BEFORE entering chroot
// in case TLS cert+key needs to be read
var err error
serverListener, err = newListener()
if err != nil {
SystemLog.Fatalf("Failed to start listener on %s://%s:%s (%s:%s) - %s", protocol, Hostname, Port, Bind, Port, err.Error())
}
// Setup the sync pools
// NOTE:
// - this must be done early on before Root is used so we can
// sanitize it first
// - cReadMax is +2 because it accounts for \r\n line-end
connRequestBufferPool = bufpools.NewBufferPool(int(*cReadMax + 2))
connBufferedWriterPool = bufpools.NewBufferedWriterPool(int(*cWriteBuf))
fileBufferedReaderPool = bufpools.NewBufferedReaderPool(int(*fReadBuf))
fileBufferPool = bufpools.NewBufferPool(int(*fReadBuf))
pathBuilderPool = &sync.Pool{
New: func() interface{} {
return newPathBuilder()
},
}
// - username supplied, lookup and set uid.
// - groupname supplied, lookup and set gid.
// - username but no groupname, we use the user primary gid
var uid, gid int
if *username != "" {
u, err := user.Lookup(*username)
if err != nil {
SystemLog.Fatalf("Error looking up user: %s", err.Error())
}
uid, _ = strconv.Atoi(u.Uid)
gid, _ = strconv.Atoi(u.Gid)
}
if *groupname != "" {
g, err := user.LookupGroup(*groupname)
if err != nil {
SystemLog.Fatalf("Error looking up group: %s", err.Error())
}
gid, _ = strconv.Atoi(g.Gid)
}
// If chroot provided, change to this!
if *chroot != "" {
err := syscall.Chroot(*chroot)
if err != nil {
SystemLog.Fatalf("Error chrooting into directory: %s", err.Error())
}
SystemLog.Infof("Chrooting into dir: %s", *chroot)
// Ensure we're at root of chroot
err = os.Chdir("/")
if err != nil {
SystemLog.Fatalf("Error entering server directory: %s", err.Error())
}
}
// Sanitize the root path! This function always returns
// a relative path, so we have to check beforehand whether
// we need to make sure its an absolute path
isAbs := (Root[0] == '/')
Root = sanitizePath(Root)
if isAbs {
Root = "/" + Root
}
// Change to server root
err = os.Chdir(Root)
if err != nil {
SystemLog.Fatalf("Error entering server directory: %s", err.Error())
}
SystemLog.Infof("Entered server dir: %s", Root)
// If been supplied a group, change to requested group
if *groupname != "" {
err := syscall.Setgid(gid)
if err != nil {
SystemLog.Fatalf("Error performing setgid: %s", err.Error())
}
SystemLog.Infof("Running as group: %s", *groupname)
}
// If been supplied a user, switch to requested user
if *username != "" {
err := syscall.Setuid(uid)
if err != nil {
SystemLog.Fatalf("Error performing setuid: %s", err.Error())
}
SystemLog.Infof("Running as user: %s", *username)
}
// Check not running as root
if syscall.Geteuid() == 0 || syscall.Getegid() == 0 {
SystemLog.Fatal("Gophi does not support running as root!")
}
// FileSystemObject (and related) setup
fileSizeMax = int64(1048576.0 * *cacheMax) // gets megabytes value in bytes
FileCache = filecache.NewFileCache(int(*cacheSize), *cacheAgeMax)
// If no restricted paths provided, set to the disabled function. Else, compile and enable
if len(*restrictedPathsList) == 0 {
SystemLog.Info("Path restrictions disabled")
IsRestrictedPath = isRestrictedPathDisabled
} else {
SystemLog.Info("Path restrictions enabled")
restrictedPaths = compileRestrictedPathsRegex(*restrictedPathsList)
IsRestrictedPath = isRestrictedPathEnabled
}
// If no hidden paths provided, set to the disabled function. Else, compile and enable
if len(*hiddenPathsList) == 0 {
SystemLog.Info("Path hiding disabled")
IsHiddenPath = isHiddenPathDisabled
} else {
SystemLog.Info("Path hiding enabled")
hiddenPaths = compileHiddenPathsRegex(*hiddenPathsList)
IsHiddenPath = isHiddenPathEnabled
}
// If no remapped paths provided, set to the disabled function. Else, compile and enable
if len(*remapRequestsList) == 0 {
SystemLog.Info("Request remapping disabled")
RemapRequest = remapRequestDisabled
} else {
SystemLog.Info("Request remapping enabled")
requestRemaps = compileRequestRemapRegex(*remapRequestsList)
RemapRequest = remapRequestEnabled
}
// If no CGI dir supplied, set to disabled function. Else, compile and enable
if *cgiDir == "" {
SystemLog.Info("CGI script support disabled")
WithinCGIDir = withinCGIDirDisabled
} else {
SystemLog.Info("CGI script support enabled")
SystemLog.Infof("CGI safe path: %s", *safePath)
cgiPath = NewSanitizedPathAtRoot(Root, *cgiDir)
cgiDirRegex = compileCGIRegex(cgiPath.Relative())
cgiEnv = setupInitialCGIEnv(*safePath)
WithinCGIDir = withinCGIDirEnabled
}
// Set appropriate Path builder function depending
// on whether user spaces enabled or disabled
if *userSpacesEnabled {
SystemLog.Info("User spaces support enabled")
BuildPath = buildPathUserSpacesEnabled
SystemLog.Info("User space directory: public_" + protocol)
} else {
SystemLog.Info("User spaces support disabled")
BuildPath = buildPathUserSpacesDisabled
}
// Set provided protocol specific functions
newFileContent = fileContent
handleDirectory = dirHandler
handleLargeFile = largeHandler
appendCgiEnv = appendCgi
// Setup signal channel
sigChannel = make(chan os.Signal)
signal.Notify(sigChannel, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
}
func setupLogger(output string) *log.SLogger {
switch output {
case "stdout":
return log.NewSLogger(os.Stdout, true)
case "stderr":
return log.NewSLogger(os.Stderr, true)
case "null":
return log.NewSLogger(&log.NilWriter{}, true)
default:
file, err := os.OpenFile(output, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatalf("Error opening log output %s: %s", output, err.Error())
}
return log.NewSLogger(file, true)
}
}

@ -2,36 +2,30 @@ package core
import (
"bufio"
"bytes"
"io"
"net"
"time"
)
var (
// connReadDeadline specifies the connection read deadline
connReadDeadline time.Duration
// connWriteDeadline specifies the connection write deadline
connWriteDeadline time.Duration
// connReadMax specifies the connection read max (in bytes)
connReadMax int
"github.com/grufwub/go-errors"
)
// deadlineConn wraps net.Conn to set the read / write deadlines on each access
type deadlineConn struct {
conn net.Conn
rd *time.Duration
wd *time.Duration
}
// Read wraps the underlying net.Conn read function, setting read deadline on each access
func (c *deadlineConn) Read(b []byte) (int, error) {
c.conn.SetReadDeadline(time.Now().Add(connReadDeadline))
c.conn.SetReadDeadline(time.Now().Add(*c.rd))
return c.conn.Read(b)
}
// Read wraps the underlying net.Conn write function, setting write deadline on each access
func (c *deadlineConn) Write(b []byte) (int, error) {
c.conn.SetWriteDeadline(time.Now().Add(connWriteDeadline))
c.conn.SetWriteDeadline(time.Now().Add(*c.wd))
return c.conn.Write(b)
}
@ -42,66 +36,94 @@ func (c *deadlineConn) Close() error {
// Conn wraps a DeadlineConn with a buffer
type conn struct {
br *bufio.Reader
b []byte
bw *bufio.Writer
cl io.Closer
c net.Conn
}
// wrapConn wraps a net.Conn in deadlineConn, then within conn and returns the result
func wrapConn(c net.Conn) *conn {
deadlineConn := &deadlineConn{c}
deadlineConn := &deadlineConn{
conn: c,
rd: &connReadDeadline,
wd: &connWriteDeadline,
}
return &conn{
br: connBufferedReaderPool.Get(deadlineConn),
b: connRequestBufferPool.Get(),
bw: connBufferedWriterPool.Get(deadlineConn),
cl: deadlineConn,
c: c,
}
}
// Conn returns the underlying net.Conn
func (c *conn) Conn() net.Conn {
return c.c
}
// ReadLine reads a single line and returns the result, or nil and error
func (c *conn) ReadLine() ([]byte, Error) {
// return slice
var b []byte
// Read! Use this method so we can
// ensure we don't perform some insanely
// long read
for len(b) < connReadMax {
// read the line
line, isPrefix, err := c.br.ReadLine()
func (c *conn) ReadLine() ([]byte, error) {
totalCount, end, emptyRead := 0, -1, 0
for {
// Perform a single read into the buffer
count, err := c.c.Read(c.b[totalCount:])
if err != nil {
return nil, WrapError(ConnReadErr, err)
return nil, errors.With(err).WrapWithin(ErrConnRead)
}
// append line contents to return slice
b = append(b, line...)
// Handle empty reads...
if count < 1 {
// After too many empty reads just return error
if !(emptyRead < 100) {
return nil, errors.With(io.ErrNoProgress).WrapWithin(ErrConnRead)
}
// Iterate empty read counter
emptyRead++
continue
}
// if finished reading, break out
if !isPrefix {
// Only accept up to new-line char
end = bytes.IndexByte(c.b[totalCount:totalCount+count], '\n')
if end != -1 {
// Drop any extra '\r'
if end > 0 && c.b[end-1] == '\r' {
end--
}
// Iterate total count up to the new-line
totalCount += end
break
}
}
return b, nil
// Iter total count
totalCount += count
// If we have hit read max, return error
if totalCount >= len(c.b) {
return nil, ErrInvalidRequest
}
}
return c.b[:totalCount], nil
}
// WriteBytes writes a byte slice to the buffer and returns error status
func (c *conn) Write(b []byte) Error {
func (c *conn) Write(b []byte) error {
_, err := c.bw.Write(b)
if err != nil {
return WrapError(ConnWriteErr, err)
return errors.With(err).WrapWithin(ErrConnWrite)
}
return nil
}
// ReadFrom writes to the buffer from a reader and returns error status
func (c *conn) ReadFrom(r io.Reader) Error {
func (c *conn) ReadFrom(r io.Reader) error {
// Since this buffer wraps deadlineConn, which DOES NOT have
// a ReadFrom method implemented, it will force the buffer to
// use it's own internal byte buffer along with the deadlineConn's
// Write implementation (forcing the deadline to be regularly updated)
_, err := c.bw.ReadFrom(r)
if err != nil {
return WrapError(ConnWriteErr, err)
return errors.With(err).WrapWithin(ErrConnWrite)
}
return nil
}
@ -113,21 +135,18 @@ func (c *conn) Writer() io.Writer {
// Close flushes the underlying buffer, closes the conn then puts
// the sync.Pool conn buffers back
func (c *conn) Close() Error {
func (c *conn) Close() error {
// Flush + close
err1 := c.bw.Flush()
err2 := c.cl.Close()
c.bw.Flush()
err := c.c.Close()
// Put buffers back
connBufferedReaderPool.Put(c.br)
connBufferedWriterPool.Put(c.bw)
connRequestBufferPool.Put(c.b)
// If either errors, wrap. Else return none
if err2 != nil {
return WrapError(ConnCloseErr, err2)
}
if err1 != nil {
return WrapError(ConnWriteErr, err1)
// Return error (if exists)
if err != nil {
return errors.With(err).WrapWithin(ErrConnClose)
}
return nil
}

@ -1,153 +1,29 @@
package core
// ErrorCode specifies types of errors for later identification
type ErrorCode int
import (
"github.com/grufwub/go-errors"
)
// Core ErrorCodes
const (
ConnWriteErr ErrorCode = -1
ConnReadErr ErrorCode = -2
ConnCloseErr ErrorCode = -3
ListenerResolveErr ErrorCode = -4
ListenerBeginErr ErrorCode = -5
ListenerAcceptErr ErrorCode = -6
InvalidIPErr ErrorCode = -7
InvalidPortErr ErrorCode = -8
MutexUpgradeErr ErrorCode = -9
MutexDowngradeErr ErrorCode = -10
FileOpenErr ErrorCode = -11
FileStatErr ErrorCode = -12
FileReadErr ErrorCode = -13
FileTypeErr ErrorCode = -14
DirectoryReadErr ErrorCode = -15
RestrictedPathErr ErrorCode = -16
InvalidRequestErr ErrorCode = -17
CGIStartErr ErrorCode = -18
CGIExitCodeErr ErrorCode = -19
CGIStatus400Err ErrorCode = -20
CGIStatus401Err ErrorCode = -21
CGIStatus403Err ErrorCode = -22
CGIStatus404Err ErrorCode = -23
CGIStatus408Err ErrorCode = -24
CGIStatus410Err ErrorCode = -25
CGIStatus500Err ErrorCode = -26
CGIStatus501Err ErrorCode = -27
CGIStatus503Err ErrorCode = -28
CGIStatusUnknownErr ErrorCode = -29
var (
ErrConnWrite = errors.BaseError("conn write error")
ErrConnRead = errors.BaseError("conn read error")
ErrConnClose = errors.BaseError("conn close error")
ErrListenerAccept = errors.BaseError("listener accept")
ErrMutexUpgrade = errors.BaseError("mutex upgrade fail")
ErrMutexDowngrade = errors.BaseError("mutex downgrade fail")
ErrFileOpen = errors.BaseError("file open error")
ErrFileStat = errors.BaseError("file stat error")
ErrFileRead = errors.BaseError("file read error")
ErrFileType = errors.BaseError("unsupported file type")
ErrDirectoryRead = errors.BaseError("directory read error")
ErrRestrictedPath = errors.BaseError("restricted path")
ErrUnescapingHost = errors.BaseError("unescaping host")
ErrUnescapingPath = errors.BaseError("unescaping path")
ErrParsingScheme = errors.BaseError("scheme parse fail")
ErrParsingHost = errors.BaseError("host parse fail")
ErrParsingURI = errors.BaseError("URI parse fail")
ErrInvalidRequest = errors.BaseError("invalid request")
ErrCGIStart = errors.BaseError("CGI start error")
ErrCGIExitCode = errors.BaseError("CGI non-zero exit code")
)
// Error specifies error interface with identifiable ErrorCode
type Error interface {
Code() ErrorCode
Error() string
}
// getExtendedErrorMessage converts an ErrorCode to string message
var getExtendedErrorMessage func(ErrorCode) string
// getErrorMessage converts an ErrorCode to string message first checking internal codes, next user supplied
func getErrorMessage(code ErrorCode) string {
switch code {
case ConnWriteErr:
return connWriteErrStr
case ConnReadErr:
return connReadErrStr
case ConnCloseErr:
return connCloseErrStr
case ListenerResolveErr:
return listenerResolveErrStr
case ListenerBeginErr:
return listenerBeginErrStr
case ListenerAcceptErr:
return listenerAcceptErrStr
case InvalidIPErr:
return invalidIPErrStr
case InvalidPortErr:
return invalidPortErrStr
case MutexUpgradeErr:
return mutexUpgradeErrStr
case MutexDowngradeErr:
return mutexDowngradeErrStr
case FileOpenErr:
return fileOpenErrStr
case FileStatErr:
return fileStatErrStr
case FileReadErr:
return fileReadErrStr
case FileTypeErr:
return fileTypeErrStr
case DirectoryReadErr:
return directoryReadErrStr
case RestrictedPathErr:
return restrictedPathErrStr
case InvalidRequestErr:
return invalidRequestErrStr
case CGIStartErr:
return cgiStartErrStr
case CGIExitCodeErr:
return cgiExitCodeErrStr
case CGIStatus400Err:
return cgiStatus400ErrStr
case CGIStatus401Err:
return cgiStatus401ErrStr
case CGIStatus403Err:
return cgiStatus403ErrStr
case CGIStatus404Err:
return cgiStatus404ErrStr
case CGIStatus408Err:
return cgiStatus408ErrStr
case CGIStatus410Err:
return cgiStatus410ErrStr
case CGIStatus500Err:
return cgiStatus500ErrStr
case CGIStatus501Err:
return cgiStatus501ErrStr
case CGIStatus503Err:
return cgiStatus503ErrStr
case CGIStatusUnknownErr:
return cgiStatusUnknownErrStr
default:
return getExtendedErrorMessage(code)
}
}
// regularError simply holds an ErrorCode
type regularError struct {
code ErrorCode
}
// Error returns the error string for the underlying ErrorCode
func (e *regularError) Error() string {
return getErrorMessage(e.code)
}
// Code returns the underlying ErrorCode
func (e *regularError) Code() ErrorCode {
return e.code
}
// NewError returns a new Error based on supplied ErrorCode
func NewError(code ErrorCode) Error {
return &regularError{code}
}
// wrappedError wraps an existing error with new ErrorCode
type wrappedError struct {
code ErrorCode
err error
}
// Error returns the error string for underlying error and set ErrorCode
func (e *wrappedError) Error() string {
return getErrorMessage(e.code) + " - " + e.err.Error()
}
// Code returns the underlying ErrorCode
func (e *wrappedError) Code() ErrorCode {
return e.code
}
// WrapError returns a new Error based on supplied error and ErrorCode
func WrapError(code ErrorCode, err error) Error {
return &wrappedError{code, err}
}

@ -1,80 +0,0 @@
package core
import (
"os"
"time"
)
// isGeneratedType just checks if a file's contents implemented is GeneratedFileContents
func isGeneratedType(f *file) bool {
switch f.contents.(type) {
case *generatedFileContents:
return true
default:
return false
}
}
// file provides a structure for managing a cached file including freshness, last refresh time etc
type file struct {
contents FileContents
lastRefresh int64
isFresh bool
UpgradeableMutex
}
// newFile returns a new File based on supplied FileContents
func newFile(contents FileContents) *file {
return &file{
contents,
0,
true,
UpgradeableMutex{},
}
}
// IsFresh returns files freshness status
func (f *file) IsFresh() bool {
return f.isFresh
}
// SetFresh sets the file as fresh
func (f *file) SetFresh() {
f.isFresh = true
}
// SetUnfresh sets the file as unfresh
func (f *file) SetUnfresh() {
f.isFresh = false
}
// LastRefresh gets the time in nanoseconds of last refresh
func (f *file) LastRefresh() int64 {
return f.lastRefresh
}
// UpdateRefreshTime updates the lastRefresh time to the current time in nanoseconds
func (f *file) UpdateRefreshTime() {
f.lastRefresh = time.Now().UnixNano()
}
// CacheContents caches the file contents using the supplied file descriptor
func (f *file) CacheContents(fd *os.File, path *Path) Error {
f.contents.Clear()
// Load the file contents into cache
err := f.contents.Load(fd, path)
if err != nil {
return err
}
// Set the cache freshness
f.UpdateRefreshTime()
f.SetFresh()
return nil
}
// WriteToClient writes the cached file contents to the supplied client
func (f *file) WriteToClient(client *Client, path *Path) Error {
return f.contents.WriteToClient(client, path)
}

@ -0,0 +1,34 @@
package core
import (
"os"
)
// FileContent provides an interface for caching, rendering and getting cached contents of a file
type FileContent interface {
Load(*Path, *os.File) error
WriteToClient(*Client, *Path) error
Clear()
}
// RegularFileContent is the simplest implementation of core.FileContents for regular files
type RegularFileContent struct {
content []byte
}
// Load takes an open FD and loads the file contents into FileContents memory
func (fc *RegularFileContent) Load(p *Path, file *os.File) error {
var err error
fc.content, err = ReadFile(file)
return err
}
// WriteToClient writes the current contents of FileContents to the client
func (fc *RegularFileContent) WriteToClient(client *Client, p *Path) error {
return client.Conn().Write(fc.content)
}
// Clear empties currently cached FileContents memory
func (fc *RegularFileContent) Clear() {
fc.content = nil
}

@ -1,48 +0,0 @@
package core
import "os"
// FileContents provides an interface for caching, rendering and getting cached contents of a file
type FileContents interface {
WriteToClient(*Client, *Path) Error
Load(*os.File, *Path) Error
Clear()
}
// generatedFileContents is a simple FileContents implementation for holding onto a generated (virtual) file contents
type generatedFileContents struct {
content []byte
}
// WriteToClient writes the generated file contents to the client
func (fc *generatedFileContents) WriteToClient(client *Client, path *Path) Error {
return client.Conn().Write(fc.content)
}
// Load does nothing
func (fc *generatedFileContents) Load(fd *os.File, path *Path) Error { return nil }
// Clear does nothing
func (fc *generatedFileContents) Clear() {}
// RegularFileContents is the simplest implementation of core.FileContents for regular files
type RegularFileContents struct {
contents []byte
}
// WriteToClient writes the current contents of FileContents to the client
func (fc *RegularFileContents) WriteToClient(client *Client, path *Path) Error {
return client.Conn().Write(fc.contents)
}
// Load takes an open FD and loads the file contents into FileContents memory
func (fc *RegularFileContents) Load(fd *os.File, path *Path) Error {
var err Error
fc.contents, err = FileSystem.ReadFile(fd)
return err
}
// Clear empties currently cached FileContents memory
func (fc *RegularFileContents) Clear() {
fc.contents = nil
}

@ -1,139 +1,51 @@
package core
import (
"bytes"
"io"
"os"
"sort"
"time"
)
var (
// FileReadBufSize is the file read buffer size
fileReadBufSize int
// MonitorSleepTime is the duration the goroutine should periodically sleep before running file cache freshness checks
monitorSleepTime time.Duration
// FileSizeMax is the maximum file size that is alloewd to be cached
fileSizeMax int64
// FileSystem is the global FileSystem object
FileSystem *FileSystemObject
// userDir is the set subdir name to be looked for under user's home folders
userDir string
"github.com/grufwub/go-errors"
)
// FileSystemObject holds onto an LRUCacheMap and manages access to it, handless freshness checking and multi-threading
type FileSystemObject struct {
cache *lruCacheMap
UpgradeableMutex
}
// NewFileSystemObject returns a new FileSystemObject
func newFileSystemObject(size int) *FileSystemObject {
return &FileSystemObject{
newLRUCacheMap(size),
UpgradeableMutex{},
}
}
// StartMonitor starts the FileSystemObject freshness check monitor in its own goroutine
func (fs *FileSystemObject) StartMonitor() {
for {
// Sleep to not take up all the precious CPU time :)
time.Sleep(monitorSleepTime)
// Check file cache freshness
fs.checkCacheFreshness()
}
}
// checkCacheFreshness iterates through FileSystemObject's cache and check for freshness
func (fs *FileSystemObject) checkCacheFreshness() {
// Before anything get cache lock
fs.Lock()
fs.cache.Iterate(func(path string, f *file) {
// If this is a generated file we skip
if isGeneratedType(f) {
return
}
// Check file still exists on disk
stat, err := os.Stat(path)
if err != nil {
SystemLog.Error(cacheFileStatErrStr, path)
fs.cache.Remove(path)
return
}
// Get last mod time and check freshness
lastMod := stat.ModTime().UnixNano()
if f.IsFresh() && f.LastRefresh() < lastMod {
f.SetUnfresh()
}
})
// Done! Unlock (:
fs.Unlock()
}
// OpenFile opens a file for reading (read-only, world-readable)
func (fs *FileSystemObject) OpenFile(p *Path) (*os.File, Error) {
fd, err := os.OpenFile(p.Absolute(), os.O_RDONLY, 0444)
func OpenFile(p *Path) (*os.File, error) {
file, err := os.OpenFile(p.Absolute(), os.O_RDONLY, 0444)
if err != nil {
return nil, WrapError(FileOpenErr, err)
return nil, errors.With(err).WrapWithin(ErrFileOpen)
}
return fd, nil
return file, nil
}
// StatFile performs a file stat on a file at path
func (fs *FileSystemObject) StatFile(p *Path) (os.FileInfo, Error) {
func StatFile(p *Path) (os.FileInfo, error) {
stat, err := os.Stat(p.Absolute())
if err != nil {
return nil, WrapError(FileStatErr, err)
return nil, errors.With(err).WrapWithin(ErrFileStat)
}
return stat, nil
}
// ReadFile reads a supplied file descriptor into a return byte slice, or error
func (fs *FileSystemObject) ReadFile(fd *os.File) ([]byte, Error) {
// Return slice
ret := make([]byte, 0)
func ReadFile(file *os.File) ([]byte, error) {
// Get read buffers, defer putting back
br := fileBufferedReaderPool.Get(fd)
br := fileBufferedReaderPool.Get(file)
defer fileBufferedReaderPool.Put(br)
// Read through file until null bytes / error
for {
// Read line
line, err := br.ReadBytes('\n')
if err != nil {
if err == io.EOF {
// EOF, add current to return slice and
// break-out. WIll not have hit delim
ret = append(ret, line...)
break
} else {
// Bad error, return
return nil, WrapError(FileReadErr, err)
}
}
// Add current line to return slice
ret = append(ret, line...)
// Read the file into new file buffer and try return this
buf := &bytes.Buffer{}
_, err := br.WriteTo(buf)
if err != nil {
return nil, errors.With(err).WrapWithin(ErrFileRead)
}
// Return!
return ret, nil
return buf.Bytes(), nil
}
// ScanFile scans a supplied file at file descriptor, using iterator function
func (fs *FileSystemObject) ScanFile(fd *os.File, iterator func(string) bool) Error {
func ScanFile(file *os.File, iterator func(string) bool) error {
// Get read buffer, defer putting back
br := fileBufferedReaderPool.Get(fd)
br := fileBufferedReaderPool.Get(file)
defer fileBufferedReaderPool.Put(br)
// Iterate through file!
@ -148,7 +60,7 @@ func (fs *FileSystemObject) ScanFile(fd *os.File, iterator func(string) bool) Er
break
} else {
// Bad error, return
return WrapError(FileReadErr, err)
return errors.With(err).WrapWithin(ErrFileRead)
}
}
@ -164,187 +76,35 @@ func (fs *FileSystemObject) ScanFile(fd *os.File, iterator func(string) bool) Er
}
// ScanDirectory reads the contents of a directory and performs the iterator function on each os.FileInfo entry returned
func (fs *FileSystemObject) ScanDirectory(fd *os.File, p *Path, iterator func(os.FileInfo, *Path)) Error {
dirList, err := fd.Readdir(-1)
func ScanDirectory(dir *os.File, p *Path, iterator func(os.FileInfo, *Path)) error {
nameList, err := dir.Readdirnames(-1)
if err != nil {
return WrapError(DirectoryReadErr, err)
return errors.With(err).WrapWithin(ErrDirectoryRead)
}
// Sort by name
sort.Sort(byName(dirList))
sort.Strings(nameList)
// Walk through the directory list using supplied iterator function
for _, info := range dirList {
for _, name := range nameList {
// Make new Path object
fp := p.JoinPath(info.Name())
fp := p.JoinPathUnsafe(name)
// Skip restricted files
if IsRestrictedPath(fp) || IsHiddenPath(fp) || WithinCGIDir(fp) {
if IsRestrictedPath(fp) || IsHiddenPath(fp) {
continue
}
// Perform iterator
iterator(info, p.JoinPath(info.Name()))
}
return nil
}
// AddGeneratedFile adds a generated file content byte slice to the file cache, with supplied path as the key
func (fs *FileSystemObject) AddGeneratedFile(p *Path, b []byte) {
// Get write lock, defer unlock
fs.Lock()
defer fs.Unlock()
// Create new generatedFileContents
contents := &generatedFileContents{b}
// Wrap contents in File
file := newFile(contents)
// Add to cache!
fs.cache.Put(p.Absolute(), file)
}
// HandleClient handles a Client, attempting to serve their request from the filesystem whether a regular file, gophermap, dir listing or CGI script
func (fs *FileSystemObject) HandleClient(client *Client, request *Request, newFileContents func(*Path) FileContents, handleDirectory func(*FileSystemObject, *Client, *os.File, *Path) Error) Error {
// If restricted, return error
if IsRestrictedPath(request.Path()) {
return NewError(RestrictedPathErr)
}
// Try remap request, log if so
ok := RemapRequest(request)
if ok {
client.LogInfo(requestRemappedStr, request.Path().Selector(), request.Params())
}
// First check for file on disk
fd, err := fs.OpenFile(request.Path())
if err != nil {
// Get read-lock, defer unlock
fs.RLock()
defer fs.RUnlock()
// Don't throw in the towel yet! Check for generated file in cache
file, ok := fs.cache.Get(request.Path().Absolute())
if !ok {
return err
}
// We got a generated file! Close and send as-is
return file.WriteToClient(client, request.Path())
}
defer fd.Close()
// Get stat
stat, goErr := fd.Stat()
if goErr != nil {
// Unlock, return error
fs.RUnlock()
return WrapError(FileStatErr, goErr)
}
switch {
// Directory
case stat.Mode()&os.ModeDir != 0:
// Don't support CGI script dir enumeration
if WithinCGIDir(request.Path()) {
return NewError(RestrictedPathErr)
}
// Else enumerate dir
return handleDirectory(fs, client, fd, request.Path())
// Regular file
case stat.Mode()&os.ModeType == 0:
// Execute script if within CGI dir
if WithinCGIDir(request.Path()) {
return ExecuteCGIScript(client, request)
}
// Else just fetch
return fs.FetchFile(client, fd, stat, request.Path(), newFileContents)
// Unsupported type
default:
return NewError(FileTypeErr)
}
}
// FetchFile attempts to fetch a file from the cache, using the supplied file stat, Path and serving client. Returns Error status
func (fs *FileSystemObject) FetchFile(client *Client, fd *os.File, stat os.FileInfo, p *Path, newFileContents func(*Path) FileContents) Error {
// If file too big, write direct to client
if stat.Size() > fileSizeMax {
return client.Conn().ReadFrom(fd)
}
// Get cache read lock, defer unlock
fs.RLock()
defer fs.RUnlock()
// Now check for file in cache
f, ok := fs.cache.Get(p.Absolute())
if !ok {
// Create new file contents with supplied function
contents := newFileContents(p)
// Wrap contents in file
f = newFile(contents)
// Cache the file contents
err := f.CacheContents(fd, p)
// Get stat or continue
stat, err := StatFile(fp)
if err != nil {
// Unlock, return error
return err
}
// Try upgrade our lock, else error out (have to remember to unlock!!)
if !fs.UpgradeLock() {
fs.Unlock()
return NewError(MutexUpgradeErr)
}
// Put file in cache
fs.cache.Put(p.Absolute(), f)
// Try downgrade our lock, else error out (have to remember to runlock!!)
if !fs.DowngradeLock() {
fs.RUnlock()
return NewError(MutexDowngradeErr)
SystemLog.Error("Error during dir scan:", err.Error())
continue
}
// Get file read lock
f.RLock()
} else {
// Get file read lock
f.RLock()
// Check for file freshness
if !f.IsFresh() {
// Try upgrade file lock, else error out (have to remember to unlock!!)
if !f.UpgradeLock() {
f.Unlock()
return NewError(MutexUpgradeErr)
}
// Refresh file contents
err := f.CacheContents(fd, p)
if err != nil {
// Unlock file, return error
f.Unlock()
return err
}
// Try downgrade file lock, else error out (have to remember to runlock!!)
if !f.DowngradeLock() {
f.RUnlock()
return NewError(MutexDowngradeErr)
}
}
// Perform iterator
iterator(stat, fp)
}
// Defer file read unlock, write to client
defer f.RUnlock()
return f.WriteToClient(client, p)
return nil
}

@ -1,18 +0,0 @@
package core
var (
// Root stores the server's root directory
Root string
// Bind stores the server's bound IP
Bind string
// Hostname stores the host's outward hostname
Hostname string
// Port stores the internal port the host is binded to
Port string
// FwdPort stores the host's outward port number
FwdPort string
)

@ -1,37 +1,26 @@
package core
import "net"
import (
"net"
// serverListener holds the global Listener object
var serverListener *listener
"github.com/grufwub/go-errors"
)
// listener wraps a net.TCPListener to return our own clients on each Accept()
type listener struct {
l *net.TCPListener
// Listener wraps a net.Listener to return our own clients on each Accept()
type Listener struct {
l net.Listener
}
// NewListener returns a new Listener or Error
func newListener(ip, port string) (*listener, Error) {
// Try resolve provided ip and port details
laddr, err := net.ResolveTCPAddr("tcp", ip+":"+port)
if err != nil {
return nil, WrapError(ListenerResolveErr, err)
}
// Create listener!
l, err := net.ListenTCP("tcp", laddr)
if err != nil {
return nil, WrapError(ListenerBeginErr, err)
}
return &listener{l}, nil
// NewListener returns a new Listener object wrapping a net.Listener
func NewListener(l net.Listener) *Listener {
return &Listener{l}
}
// Accept accepts a new connection and returns a client, or error
func (l *listener) Accept() (*Client, Error) {
conn, err := l.l.AcceptTCP()
func (l *Listener) Accept() (*Client, error) {
conn, err := l.l.Accept()
if err != nil {
return nil, WrapError(ListenerAcceptErr, err)
return nil, errors.With(err).WrapWithin(ErrListenerAccept)
}
return NewClient(conn), nil
}

@ -1,92 +0,0 @@
package core
import (
"log"
"os"
)
var (
// AccessLog holds a global access LogObject
AccessLog loggerInterface
// SystemLog holds a global system LogObject
SystemLog loggerInterface
)
func setupLogger(output string) loggerInterface {
switch output {
case "stdout":
return &stdLogger{}
case "null":
return &nullLogger{}
default:
fd, err := os.OpenFile(output, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatalf(logOutputErrStr, output, err.Error())
}
return &logger{log.New(fd, "", log.LstdFlags)}
}
}
// LoggerInterface specifies an interface that can log different message levels
type loggerInterface interface {
Info(string, ...interface{})
Error(string, ...interface{})
Fatal(string, ...interface{})
}
// StdLogger implements LoggerInterface to log to output using regular log
type stdLogger struct{}
// Info logs to log.Logger with info level prefix
func (l *stdLogger) Info(fmt string, args ...interface{}) {
log.Printf(":: I :: "+fmt, args...)
}
// Error logs to log.Logger with error level prefix
func (l *stdLogger) Error(fmt string, args ...interface{}) {
log.Printf(":: E :: "+fmt, args...)
}
// Fatal logs to standard log with fatal prefix and terminates program
func (l *stdLogger) Fatal(fmt string, args ...interface{}) {
log.Fatalf(":: F :: "+fmt, args...)
}
// logger implements LoggerInterface to log to output using underlying log.Logger
type logger struct {
lg *log.Logger
}
// Info logs to log.Logger with info level prefix
func (l *logger) Info(fmt string, args ...interface{}) {
l.lg.Printf("I :: "+fmt, args...)
}
// Error logs to log.Logger with error level prefix
func (l *logger) Error(fmt string, args ...interface{}) {
l.lg.Printf("E :: "+fmt, args...)
}
// Fatal logs to log.Logger with fatal prefix and terminates program
func (l *logger) Fatal(fmt string, args ...interface{}) {
l.lg.Fatalf("F :: "+fmt, args...)
}
// nullLogger implements LoggerInterface to do absolutely fuck-all
type nullLogger struct{}
// Info does nothing
func (l *nullLogger) Info(fmt string, args ...interface{}) {
// do nothing
}
// Error does nothing
func (l *nullLogger) Error(fmt string, args ...interface{}) {
// do nothing
}
// Fatal simply terminates the program
func (l *nullLogger) Fatal(fmt string, args ...interface{}) {
os.Exit(1)
}

@ -1,68 +0,0 @@
package core
import (
"sync"
"sync/atomic"
"time"
)
// UpgradeableMutex wraps a RWMutex and provides safe upgrading / downgrading
// by informing you whether a write lock was achieved in the brief swap time
type UpgradeableMutex struct {
wLast int64
internal sync.RWMutex
}
// RLock exactly wraps the internal RWMutex
func (mu *UpgradeableMutex) RLock() {
mu.internal.RLock()
}
// RUnlock exactly wraps the internal RWMutex
func (mu *UpgradeableMutex) RUnlock() {
mu.internal.RUnlock()
}
// Lock wraps the internal RWMutex, atomically storing the last write-lock time
func (mu *UpgradeableMutex) Lock() {
mu.internal.Lock()
atomic.StoreInt64(&mu.wLast, time.Now().UnixNano())
}
// Unlock exactly wraps the internal RWMutex
func (mu *UpgradeableMutex) Unlock() {
mu.internal.Unlock()
}
// safeSwap stores the current time, performs the swap function and checks if
// any write locks were achieved during the swap function
func (mu *UpgradeableMutex) safeSwap(swapFn func()) bool {
// Get the 'now' time
now := time.Now().UnixNano()
// Store now time
atomic.StoreInt64(&mu.wLast, now)
// Perform the swap
swapFn()
// Successful swap determined by if last write-lock
// is still equal to 'now'
return atomic.LoadInt64(&mu.wLast) == now
}
// UpgradeLock upgrades a read to a write lock, returning success state as a bool
func (mu *UpgradeableMutex) UpgradeLock() bool {
return mu.safeSwap(func() {
mu.internal.RUnlock()
mu.internal.Lock()
})
}
// DowngradeLock downgrades a write to a read lock, returning success state as a bool
func (mu *UpgradeableMutex) DowngradeLock() bool {
return mu.safeSwap(func() {
mu.internal.Unlock()
mu.internal.RLock()
})
}

@ -1,10 +1,14 @@
package core
import (
"os/user"
"path"
"strings"
)
// BuildPath is the global Path builder function for a supplied raw (relative) path
var BuildPath func(string) *Path
// Path safely holds a file path
type Path struct {
root string // root dir
@ -13,18 +17,53 @@ type Path struct {
}
// NewPath returns a new Path structure based on supplied root and relative path
func NewPath(root, rel string) *Path {
func newPath(root, rel string) *Path {
return &Path{root, rel, formatSelector(rel)}
}
// newSanitizedPath returns a new sanitized Path structure based on root and relative path
func newSanitizedPath(root, rel string) *Path {
return NewPath(root, sanitizeRawPath(root, rel))
// NewSanitizedPathAtRoot returns a new sanitized Path structure based on root and relative path
func NewSanitizedPathAtRoot(root, rel string) *Path {
return newPath(root, sanitizePath(rel))
}
// buildPathuserSpacesEnabled will attempt to parse a username, and return a sanitized Path at username's
// public server dir. Else, returns sanitized Path at server root
func buildPathUserSpacesEnabled(rawPath string) *Path {
if strings.HasPrefix(rawPath, "/~") {
// Get username and raw path
username, path := SplitByBefore(rawPath[2:], "/")
// See if this user exists, get their home directory
user, err := user.Lookup(username)
if err != nil {
return NewSanitizedPathAtRoot(Root, rawPath)
}
// Generate user public server root
userRoot := joinSanitizedPaths(user.HomeDir, "public_"+protocol)
// Return sanitized path using user home dir as root
return NewSanitizedPathAtRoot(userRoot, path)
}
// Return sanitized path at server root
return NewSanitizedPathAtRoot(Root, rawPath)
}
// buildPathUserSpacesDisabled always returns a sanitized Path at server root
func buildPathUserSpacesDisabled(rawPath string) *Path {
return NewSanitizedPathAtRoot(Root, rawPath)
}
// Remap remaps a Path to a new relative path, keeping previous selector
func (p *Path) Remap(newRel string) {
p.rel = sanitizeRawPath(p.root, newRel)
p.rel = sanitizePath(newRel)
}
// RemapDirect remaps a Path to a new absolute path, keeping previous selector
func (p *Path) RemapDirect(newAbs string) {
p.root = newAbs
p.rel = ""
}
// Root returns file's root directory
@ -39,7 +78,7 @@ func (p *Path) Relative() string {
// Absolute returns the absolute path
func (p *Path) Absolute() string {
return path.Join(p.root, p.rel)
return joinSanitizedPaths(p.root, p.rel)
}
// Selector returns the formatted selector path
@ -64,55 +103,217 @@ func (p *Path) Dir() *Path {
// JoinRelative returns a string appended to the current relative path
func (p *Path) JoinRelative(newRel string) string {
return path.Join(p.rel, newRel)
return joinSanitizedPaths(p.rel, sanitizePath(newRel))
}
// JoinPath appends the supplied string to the Path's relative and selector paths
func (p *Path) JoinPath(toJoin string) *Path {
return &Path{p.root, path.Join(p.rel, toJoin), path.Join(p.sel, toJoin)}
// JoinPathUnsafe appends the supplied string to the Path's relative and selector paths. This is unsafe because
// if the toJoin is a back-traversal then you can escape root
func (p *Path) JoinPathUnsafe(toJoin string) *Path {
toJoin = sanitizePath(toJoin)
return &Path{p.root, joinSanitizedPaths(p.rel, toJoin), joinSanitizedPaths(p.sel, toJoin)}
}
// formatSelector formats a relative path to a valid selector path
func formatSelector(rel string) string {
switch len(rel) {
if len(rel) > 0 && rel[0] == '/' {
return rel
}
return "/" + rel
}
// joinSanitizedPaths quickly joins two sanitized paths, and returns either an absolute or relative
// path depending on whether the start string was absolute or relative
func joinSanitizedPaths(start, end string) string {
// Format the end string
endLen := len(end)
switch endLen {
case 0:
return "/"
// do nothing
case 1:
if rel[0] == '.' {
return "/"
// If this is '/', trim!
if end[0] == '/' {
end = ""
}
default:
// Trim any leading '/'
if end[0] == '/' {
end = end[1:]
endLen--
}
return "/" + rel
// Trim any trailing '/'
if end[endLen-1] == '/' {
end = end[:endLen-1]
}
}
// Format the start string and return
switch start {
case "":
return end
case "/":
return start + end
default:
if rel[0] == '/' {
return rel
// Ensure there is a path separator
// between the final path component of
// start, and the beginning of end
if start[len(start)-1] == '/' {
return start + end
}
return "/" + rel
return start + "/" + end
}
}
// sanitizeRawPath takes a root and relative path, and returns a sanitized relative path
func sanitizeRawPath(root, rel string) string {
// Start by cleaning
rel = path.Clean(rel)
// pathBuilder builds relative paths,
// taking backtracks ("..") into account
type pathBuilder struct {
b []byte
starts []int
}
if path.IsAbs(rel) {
// Absolute path, try trimming root and leading '/'
rel = strings.TrimPrefix(strings.TrimPrefix(rel, root), "/")
} else {
// Relative path, if back dir traversal give them server root
if strings.HasPrefix(rel, "..") {
rel = ""
}
// newPathBuilder returns a new pathBuilder
func newPathBuilder() *pathBuilder {
return &pathBuilder{
b: make([]byte, 64)[:0],
starts: make([]int, 10)[:0],
}
}
// split add's a path split to the buffer and
// starts tracking the start of a new path segment
func (pb *pathBuilder) split() {
// Add a '/'
pb.b = append(pb.b, '/')
// Set the next segment start
pb.starts = append(pb.starts, len(pb.b))
}
// append adds a byte to the buffer
func (pb *pathBuilder) append(b byte) {
pb.b = append(pb.b, b)
}
// backtrack removes the latest path element
func (pb *pathBuilder) backtrack() {
// Get segments count
length := len(pb.starts)
// If length = 0, just empty buffer
if length == 0 {
pb.b = pb.b[:0]
return
}
return rel
// Get the last segment start
last := pb.starts[length-1]
// Jump back to last segment
pb.b = pb.b[:last-1]
// Reset to previous segment start
pb.starts = pb.starts[:length-1]
}
// sanitizerUserRoot takes a generated user root directory and sanitizes it, returning a bool as to whether it's safe
func sanitizeUserRoot(root string) (string, bool) {
root = path.Clean(root)
if !strings.HasPrefix(root, "/home/") && strings.HasSuffix(root, "/"+userDir) {
return "", false
// toString gets current path as string, and resets the builder
func (pb *pathBuilder) toString() string {
// Get buffer as string, skip first
// char i.e. '/'
s := string(pb.b)
if len(s) > 0 {
s = s[1:]
}
return root, true
// Reset buffer and list of segment starts
pb.b = pb.b[:0]
pb.starts = pb.starts[:0]
// Return string
return s
}
// sanitizePath sanitizes a raw path and returns a relative path
func sanitizePath(raw string) string {
// Empty path, nothing to do
if raw == "" {
return ""
}
// Get necessary information beforehand
length := len(raw)
pb := pathBuilderPool.Get().(*pathBuilder)
defer pathBuilderPool.Put(pb)
for index := 0; index < length; {
// Path segment separator
if raw[index] == '/' {
index++
continue
}
// Segment starting '.'
if raw[index] == '.' {
// Hit the end of the path, break-out
if index+1 == length {
break
}
// Iter index
index++
// This segment is only '.', continue
if raw[index] == '/' {
index++
continue
}
// If next char is '.'
if raw[index] == '.' {
// Hit end of the path on a '..',
// backtrack and break-out
if index+1 == length {
pb.backtrack()
break
}
// Iter index
index++
// If next char is '/' then this is a
// backtrack segment, backtrack and skip
// to next
if raw[index] == '/' {
pb.backtrack()
continue
}
// Split for next segment
pb.split()
// '..' but no backtrack, add to path
// (next '.' is added below)
pb.append('.')
} else {
// Split for next segment
pb.split()
}
// '.' but no backtrack, add to path
pb.append('.')
} else {
// Split for next segment
pb.split()
}
// Iter through and add up to next '/'
for ; index < length && raw[index] != '/'; index++ {
pb.append(raw[index])
}
// Final iter to skip past the final '/'
index++
}
// Return built string
return pb.toString()
}

@ -1,108 +0,0 @@
package core
import (
"bufio"
"io"
"sync"
)
var (
connBufferedReaderPool *bufferedReaderPool
connBufferedWriterPool *bufferedWriterPool
fileBufferedReaderPool *bufferedReaderPool
fileBufferPool *bufferPool
)
type bufferPool struct {
pool sync.Pool
}
func newBufferPool(size int) *bufferPool {
return &bufferPool{
pool: sync.Pool{
New: func() interface{} {
return make([]byte, size)
},
},
}
}
func (bp *bufferPool) Get() []byte {
// Just return and cast a buffer
return bp.pool.Get().([]byte)
}
func (bp *bufferPool) Put(b []byte) {
// Just put back in pool
bp.pool.Put(b)
}
type bufferedReaderPool struct {
pool sync.Pool
}
func newBufferedReaderPool(size int) *bufferedReaderPool {
return &bufferedReaderPool{
pool: sync.Pool{
New: func() interface{} {
return bufio.NewReaderSize(nil, size)
},
},
}
}
func (bp *bufferedReaderPool) Get(r io.Reader) *bufio.Reader {
// Get a buffered reader from the pool
br := bp.pool.Get().(*bufio.Reader)
// Reset to use our new reader!
br.Reset(r)
// Return
return br
}
func (bp *bufferedReaderPool) Put(br *bufio.Reader) {
// We must reset again here to ensure
// we don't mess with GC with unused underlying
// reader.
br.Reset(nil)
// Put back in the pool
bp.pool.Put(br)
}
type bufferedWriterPool struct {
pool sync.Pool
}
func newBufferedWriterPool(size int) *bufferedWriterPool {
return &bufferedWriterPool{
pool: sync.Pool{
New: func() interface{} {
return bufio.NewWriterSize(nil, size)
},
},
}
}
func (bp *bufferedWriterPool) Get(w io.Writer) *bufio.Writer {
// Get a buffered writer from the pool
bw := bp.pool.Get().(*bufio.Writer)
// Reset to user our new writer
bw.Reset(w)
// Return
return bw
}
func (bp *bufferedWriterPool) Put(bw *bufio.Writer) {
// We must reset again here to ensure
// we don't mess with GC with unused underlying
// writer.
bw.Reset(nil)
// Put back in the pool
bp.pool.Put(bw)
}

@ -1,37 +1,10 @@
package core
import (
"path"
"regexp"
"strings"
)
var (
// cgiDir is a precompiled regex statement to check if a string matches the server's CGI directory
cgiDirRegex *regexp.Regexp
// WithinCGIDir returns whether a path is within the server's specified CGI scripts directory
WithinCGIDir func(*Path) bool
// restrictedPaths is the global slice of restricted paths
restrictedPaths []*regexp.Regexp
// IsRestrictedPath is the global function to check against restricted paths
IsRestrictedPath func(*Path) bool
// hiddenPaths is the global slice of hidden (from dir view) paths
hiddenPaths []*regexp.Regexp
// IsHiddenPath is the global function to check against hidden paths
IsHiddenPath func(*Path) bool
// requestRemaps is the global slice of remapped paths
requestRemaps []*RequestRemap
// RemapRequest is the global function to remap a request
RemapRequest func(*Request) bool
)
// PathMapSeparatorStr specifies the separator string to recognise in path mappings
const requestRemapSeparatorStr = " -> "
@ -43,73 +16,66 @@ type RequestRemap struct {
// compileCGIRegex takes a supplied string and returns compiled regular expression
func compileCGIRegex(cgiDir string) *regexp.Regexp {
if path.IsAbs(cgiDir) {
if !strings.HasPrefix(cgiDir, Root) {
SystemLog.Fatal(cgiDirOutsideRootStr)
}
} else {
cgiDir = path.Join(Root, cgiDir)
}
SystemLog.Info(cgiDirStr, cgiDir)
return regexp.MustCompile("(?m)" + cgiDir + "(|/.*)$")
SystemLog.Infof("CGI directory: %s", cgiDir)
return regexp.MustCompile("^" + cgiDir + "(/.*)?$")
}
// compileRestrictedPathsRegex turns a string of restricted paths into a slice of compiled regular expressions
func compileRestrictedPathsRegex(restrictions string) []*regexp.Regexp {
func compileRestrictedPathsRegex(restrictions []string) []*regexp.Regexp {
regexes := make([]*regexp.Regexp, 0)
// Split restrictions string by new lines
for _, expr := range strings.Split(restrictions, "\n") {
for _, expr := range restrictions {
// Skip empty expressions
if len(expr) == 0 {
continue
}
// Compile the regular expression
regex, err := regexp.Compile("(?m)" + expr + "$")
regex, err := regexp.Compile("^" + expr + "$")
if err != nil {
SystemLog.Fatal(pathRestrictRegexCompileFailStr, expr)
SystemLog.Fatalf("Failed compiling restricted path regex: %s", expr)
}
// Append compiled regex and log
regexes = append(regexes, regex)
SystemLog.Info(pathRestrictRegexCompiledStr, expr)
SystemLog.Infof("Compiled restricted path regex: %s", expr)
}
return regexes
}
// compileRestrictedPathsRegex turns a string of hidden paths into a slice of compiled regular expressions
func compileHiddenPathsRegex(hidden string) []*regexp.Regexp {
func compileHiddenPathsRegex(hidden []string) []*regexp.Regexp {
regexes := make([]*regexp.Regexp, 0)
// Split restrictions string by new lines
for _, expr := range strings.Split(hidden, "\n") {
for _, expr := range hidden {
// Skip empty expressions
if len(expr) == 0 {
continue
}
// Compile the regular expression
regex, err := regexp.Compile("(?m)" + expr + "$")
regex, err := regexp.Compile("^" + expr + "$")
if err != nil {
SystemLog.Fatal(pathHidingRegexCompileFailStr, expr)
SystemLog.Fatalf("Failed compiling hidden path regex: %s", expr)
}
// Append compiled regex and log
regexes = append(regexes, regex)
SystemLog.Info(pathHidingRegexCompiledStr, expr)
SystemLog.Infof("Compiled hidden path regex: %s", expr)
}
return regexes
}
// compil RequestRemapRegex turns a string of remapped paths into a slice of compiled RequestRemap structures
func compileRequestRemapRegex(remaps string) []*RequestRemap {
// compileRequestRemapRegex turns a string of remapped paths into a slice of compiled RequestRemap structures
func compileRequestRemapRegex(remaps []string) []*RequestRemap {
requestRemaps := make([]*RequestRemap, 0)
// Split remaps string by new lines
for _, expr := range strings.Split(remaps, "\n") {
for _, expr := range remaps {
// Skip empty expressions
if len(expr) == 0 {
continue
@ -118,26 +84,26 @@ func compileRequestRemapRegex(remaps string) []*RequestRemap {
// Split into alias and remap
split := strings.Split(expr, requestRemapSeparatorStr)
if len(split) != 2 {
SystemLog.Fatal(requestRemapRegexInvalidStr, expr)
SystemLog.Fatalf("Invalid request remap regex: %s", expr)
}
// Compile the regular expression
regex, err := regexp.Compile("(?m)" + strings.TrimPrefix(split[0], "/") + "$")
regex, err := regexp.Compile("^" + strings.TrimPrefix(split[0], "/") + "$")
if err != nil {
SystemLog.Fatal(requestRemapRegexCompileFailStr, expr)
SystemLog.Fatalf("Failed compiling request remap regex: %s", expr)
}
// Append RequestRemap and log
requestRemaps = append(requestRemaps, &RequestRemap{regex, strings.TrimPrefix(split[1], "/")})
SystemLog.Info(requestRemapRegexCompiledStr, expr)
SystemLog.Infof("Compiled path remap regex: %s", expr)
}
return requestRemaps
}
// withinCGIDirEnabled returns whether a Path's absolute value matches within the CGI dir
// withinCGIDirEnabled returns whether a Path's relative value matches within the CGI dir
func withinCGIDirEnabled(p *Path) bool {
return cgiDirRegex.MatchString(p.Absolute())
return cgiDirRegex.MatchString(p.Relative())
}
// withinCGIDirDisabled always returns false, CGI is disabled
@ -189,11 +155,11 @@ func remapRequestEnabled(request *Request) bool {
raw = remap.Regex.ExpandString(raw, remap.Template, request.Path().Selector(), submatches)
}
// Split to new path and paramters again
path, params := splitBy(string(raw), "?")
// Split to new path and query again
path, query := SplitBy(string(raw), "?")
// Remap request, log, return
request.Remap(path, params)
request.Remap(path, query)
return true
}
return false

@ -1,25 +1,61 @@
package core
// Request is a data structure for storing a filesystem path, and params, parsed from a client's request
// Request is a data structure for storing a filesystem path, and query, parsed from a client's request
type Request struct {
p *Path
params string
path *Path
query string
}
// NewRequest returns a new Request object
func NewRequest(path *Path, query string) *Request {
return &Request{
path: path,
query: query,
}
}
// String returns the full parsed request string (mainly for logging)
func (r *Request) String() string {
query := r.query
if query != "" {
query = "?" + query
}
return r.path.Selector() + query
}
// Path returns the requests associate Path object
func (r *Request) Path() *Path {
return r.p
return r.path
}
// Params returns the request's parameters string
func (r *Request) Params() string {
return r.params
// Query returns the request's query string
func (r *Request) Query() string {
return r.query
}
// Remap modifies a request to use new relative path, and accommodate supplied extra parameters
func (r *Request) Remap(rel, params string) {
if len(r.params) > 0 {
r.params = params + "&" + r.params
// AddToQuery adds provided query to beginning of existing query, formatting as necessary
func (r *Request) AddToQuery(query string) {
// Ensure we have been given query
if len(query) < 1 {
return
}
r.p.Remap(rel)
// Either append or set query
if len(r.query) > 0 {
r.query = query + "&" + r.query
} else {
r.query = query
}
}
// Remap modifies a request to use new relative path, and accommodate supplied extra query if provided
func (r *Request) Remap(rel, query string) {
r.AddToQuery(query)
r.path.Remap(rel)
}
// RemapDirect modifies a request to use new absolute path, and accomodate supplied extra query if provided
func (r *Request) RemapDirect(abs, query string) {
r.AddToQuery(query)
r.path.RemapDirect(abs)
}

@ -1,203 +1,108 @@
package core
import (
"flag"
"fmt"
"os"
"os/signal"
"path"
"strconv"
"strings"
"syscall"
"regexp"
"sync"
"time"
"github.com/grufwub/go-bufpools"
"github.com/grufwub/go-errors"
"github.com/grufwub/go-filecache"
"github.com/grufwub/go-logger"
)
const (
// Version holds the current version string
Version = "v2.0.2-alpha"
Version = "v3.2.0-beta"
)
var (
// SigChannel is the global OS signal channel
sigChannel chan os.Signal
)
// Root stores the server's root directory
Root string
// ParseFlagsAndSetup parses necessary core server flags, and sets up the core ready for Start() to be called
func ParseFlagsAndSetup(proto string, errorMessageFunc func(ErrorCode) string) {
// Setup numerous temporary flag variables, and store the rest
// directly in their final operating location. Strings are stored
// in `string_constants.go` to allow for later localization
sysLog := flag.String(sysLogFlagStr, "stdout", sysLogDescStr)
accLog := flag.String(accLogFlagStr, "stdout", accLogDescStr)
flag.StringVar(&Root, rootFlagStr, "/var/gopher", rootDescStr)
flag.StringVar(&Bind, bindFlagStr, "", bindDescStr)
flag.StringVar(&Hostname, hostnameFlagStr, "localhost", hostnameDescStr)
port := flag.Uint(portFlagStr, 70, portDescStr)
fwdPort := flag.Uint(fwdPortFlagStr, 0, fwdPortDescStr)
flag.DurationVar(&connReadDeadline, readDeadlineFlagStr, time.Duration(time.Second*3), readDeadlineDescStr)
flag.DurationVar(&connWriteDeadline, writeDeadlineFlagStr, time.Duration(time.Second*5), writeDeadlineDescStr)
cReadBuf := flag.Uint(connReadBufFlagStr, 1024, connReadBufDescStr)
cWriteBuf := flag.Uint(connWriteBufFlagStr, 1024, connWriteBufDescStr)
cReadMax := flag.Uint(connReadMaxFlagStr, 4096, connReadMaxDescStr)
fReadBuf := flag.Uint(fileReadBufFlagStr, 1024, fileReadBufDescStr)
flag.DurationVar(&monitorSleepTime, monitorSleepTimeFlagStr, time.Duration(time.Second*1), monitorSleepTimeDescStr)
cacheMax := flag.Float64(cacheFileMaxFlagStr, 1.0, cacheFileMaxDescStr)
cacheSize := flag.Uint(cacheSizeFlagStr, 100, cacheSizeDescStr)
restrictedPathsList := flag.String(restrictPathsFlagStr, "", restrictPathsDescStr)
hiddenPathsList := flag.String(hiddenPathsFlagStr, "", hiddenPathsDescStr)
remapRequestsList := flag.String(remapRequestsFlagStr, "", remapRequestsDescStr)
cgiDir := flag.String(cgiDirFlagStr, "", cgiDirDescStr)
flag.DurationVar(&maxCGIRunTime, maxCGITimeFlagStr, time.Duration(time.Second*3), maxCGITimeDescStr)
safePath := flag.String(safePathFlagStr, "/bin:/usr/bin", safePathDescStr)
httpCompatCGI := flag.Bool(httpCompatCGIFlagStr, false, httpCompatCGIDescStr)
httpPrefixBuf := flag.Uint(httpPrefixBufFlagStr, 1024, httpPrefixBufDescStr)
flag.StringVar(&userDir, userDirFlagStr, "", userDirDescStr)
printVersion := flag.Bool(versionFlagStr, false, versionDescStr)
// Parse flags! (including any set by outer calling function)
flag.Parse()
// If version print requested, do so!
if *printVersion {
fmt.Println("Gophor " + Version)
os.Exit(0)
}
// Bind stores the server's bound IP
Bind string
// Setup loggers
SystemLog = setupLogger(*sysLog)
if *sysLog == *accLog {
AccessLog = SystemLog
} else {
AccessLog = setupLogger(*accLog)
}
// Hostname stores the host's outward hostname
Hostname string
// Check valid values for BindAddr and Hostname
if Hostname == "" {
if Bind == "" {
SystemLog.Fatal(hostnameBindEmptyStr)
}
Hostname = Bind
}
// Port stores the internal port the host is binded to
Port string
// Change to server directory
if osErr := os.Chdir(Root); osErr != nil {
SystemLog.Fatal(chDirErrStr, osErr)
}
SystemLog.Info(chDirStr, Root)
// AccessLog is the global Access SLogger
AccessLog *logger.SLogger
// Set port info
if *fwdPort == 0 {
fwdPort = port
}
Port = strconv.Itoa(int(*port))
FwdPort = strconv.Itoa(int(*fwdPort))
// SystemLog is the global System SLogger
SystemLog *logger.SLogger
// Set protocol string (only really used by CGI and one call to SystemLog)
protocol = proto
// FileCache is the global FileCache object
FileCache *filecache.FileCache
// Setup listener
var err Error
serverListener, err = newListener(Bind, Port)
if err != nil {
SystemLog.Fatal(listenerBeginFailStr, protocol, Hostname, FwdPort, Bind, Port, err.Error())
}
// File system related globals
monitorSleepTime time.Duration
fileSizeMax int64
// Setup the sync pools
connBufferedReaderPool = newBufferedReaderPool(int(*cReadBuf))
connBufferedWriterPool = newBufferedWriterPool(int(*cWriteBuf))
fileBufferedReaderPool = newBufferedReaderPool(int(*fReadBuf))
fileBufferPool = newBufferPool(int(*fReadBuf))
// Global listener
serverListener *Listener
// Conn read max
connReadMax = int(*cReadMax)
// Client connection related globals
connReadDeadline time.Duration
connWriteDeadline time.Duration
// FileSystemObject (and related) setup
fileSizeMax = int64(1048576.0 * *cacheMax) // gets megabytes value in bytes
FileSystem = newFileSystemObject(int(*cacheSize))
// Server protocol
protocol string
// If no restricted paths provided, set to the disabled function. Else, compile and enable
if *restrictedPathsList == "" {
SystemLog.Info(pathRestrictionsDisabledStr)
IsRestrictedPath = isRestrictedPathDisabled
} else {
SystemLog.Info(pathRestrictionsEnabledStr)
restrictedPaths = compileRestrictedPathsRegex(*restrictedPathsList)
IsRestrictedPath = isRestrictedPathEnabled
}
// CGI related globals
cgiPath *Path
cgiEnv []string
// If no hidden paths provided, set to the disabled function. Else, compile and enable
if *hiddenPathsList == "" {
SystemLog.Info(pathHidingDisableStr)
IsHiddenPath = isHiddenPathDisabled
} else {
SystemLog.Info(pathHidingEnabledStr)
hiddenPaths = compileHiddenPathsRegex(*hiddenPathsList)
IsHiddenPath = isHiddenPathEnabled
}
// Global OS signal channel
sigChannel chan os.Signal
// If no remapped paths provided, set to the disabled function. Else, compile and enable
if *remapRequestsList == "" {
SystemLog.Info(requestRemapDisabledStr)
RemapRequest = remapRequestDisabled
} else {
SystemLog.Info(requestRemapEnabledStr)
requestRemaps = compileRequestRemapRegex(*remapRequestsList)
RemapRequest = remapRequestEnabled
}
// Buffer pools
connRequestBufferPool *bufpools.BufferPool
connBufferedReaderPool *bufpools.BufferedReaderPool
connBufferedWriterPool *bufpools.BufferedWriterPool
fileBufferedReaderPool *bufpools.BufferedReaderPool
fileBufferPool *bufpools.BufferPool
pathBuilderPool *sync.Pool
// If no CGI dir supplied, set to disabled function. Else, compile and enable
if *cgiDir == "" {
SystemLog.Info(cgiSupportDisabledStr)
WithinCGIDir = withinCGIDirDisabled
} else {
SystemLog.Info(cgiSupportEnabledStr)
cgiDirRegex = compileCGIRegex(*cgiDir)
cgiEnv = setupInitialCGIEnv(*safePath)
WithinCGIDir = withinCGIDirEnabled
// Enable HTTP compatible CGI scripts, or not
if *httpCompatCGI {
SystemLog.Info(cgiHTTPCompatEnabledStr, httpPrefixBuf)
ExecuteCGIScript = executeCGIScriptStripHTTP
httpPrefixBufSize = int(*httpPrefixBuf)
} else {
ExecuteCGIScript = executeCGIScriptNoHTTP
}
}
// Compiled regex globals
cgiDirRegex *regexp.Regexp
restrictedPaths []*regexp.Regexp
hiddenPaths []*regexp.Regexp
requestRemaps []*RequestRemap
// If no user dir supplied, set to disabled function. Else, set user dir and enable
if userDir == "" {
SystemLog.Info(userDirDisabledStr)
getRequestPath = getRequestPathUserDirDisabled
} else {
SystemLog.Info(userDirEnabledStr)
getRequestPath = getRequestPathUserDirEnabled
// Clean the user dir to be safe
userDir = path.Clean(userDir)
if strings.HasPrefix(userDir, "..") {
SystemLog.Fatal(userDirBackTraverseErrStr, userDir)
} else {
SystemLog.Info(userDirStr, userDir)
}
}
// WithinCGIDir returns whether a path is within the server's specified CGI scripts directory
WithinCGIDir func(*Path) bool
// Set ErrorCode->string function
getExtendedErrorMessage = errorMessageFunc
// appendCgiEnv is the global function set by implementor to append protocol specific CGI information
appendCgiEnv func(*Client, *Request, []string) []string
// Setup signal channel
sigChannel = make(chan os.Signal)
signal.Notify(sigChannel, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
}
// IsRestrictedPath is the global function to check against restricted paths
IsRestrictedPath func(*Path) bool
// IsHiddenPath is the global function to check against hidden paths
IsHiddenPath func(*Path) bool
// RemapRequest is the global function to remap a request
RemapRequest func(*Request) bool
// Global client-handling filesystem functions
newFileContent func(*Path) FileContent
handleDirectory func(*Client, *os.File, *Path) error
handleLargeFile func(*Client, *os.File, *Path) error
)
// Start begins operation of the server
func Start(serve func(*Client)) {
// Start the FileSystemObject cache freshness monitor
SystemLog.Info(cacheMonitorStartStr, monitorSleepTime)
go FileSystem.StartMonitor()
// Start the FileCache freshness monitor
SystemLog.Infof("Starting cache monitor with freq: %s", monitorSleepTime.String())
go FileCache.StartMonitor(monitorSleepTime)
// Start the listener
SystemLog.Info(listeningOnStr, protocol, Hostname, FwdPort, Bind, Port)
SystemLog.Infof("Listening on %s://%s:%s (%s:%s)", protocol, Hostname, Port, Bind, Port)
go func() {
for {
client, err := serverListener.Accept()
@ -214,12 +119,158 @@ func Start(serve func(*Client)) {
}()
// Listen for OS signals and terminate if necessary
listenForOSSignals()
}
// ListenForOSSignals listens for OS signals and terminates the program if necessary
func listenForOSSignals() {
sig := <-sigChannel
SystemLog.Info(signalReceivedStr, sig)
SystemLog.Infof("Signal received: %s. Shutting down...", sig.String())
os.Exit(0)
}
// HandleClient handles a Client, attempting to serve their request from the filesystem whether a regular file, gophermap, dir listing or CGI script
func HandleClient(client *Client, request *Request) error {
// If restricted, return error
if IsRestrictedPath(request.Path()) {
return ErrRestrictedPath.Extend(request.Path().Selector())
}
// Try remap if necessary. If remapped, log!
if ok := RemapRequest(request); ok {
client.LogInfo("Remapped request: %s %s", request.Path().Selector(), request.Query())
}
// If within CGI dir, attempt to execute this!
if WithinCGIDir(request.Path()) {
return TryExecuteCGIScript(client, request)
}
// First check for file on disk
file, err := OpenFile(request.Path())
if err != nil {
// Get read-lock, defer unlock
FileCache.RLock()
defer FileCache.RUnlock()
// Don't throw in the towel yet! Check for generated file in cache
cached, ok := FileCache.Get(request.Path().Absolute())
if !ok {
return err
}
// We got a generated file! Get content
content := cached.Content().(FileContent)
// Send content to client
return content.WriteToClient(client, request.Path())
}
defer file.Close()
// Get stat
stat, err := file.Stat()
if err != nil {
return errors.With(err).WrapWithin(ErrFileStat)
}
switch {
// Directory
case stat.Mode().IsDir():
return handleDirectory(client, file, request.Path())
// Regular file
case stat.Mode().IsRegular():
return FetchFile(client, file, stat, request.Path())
// Unsupported type
default:
return ErrFileType
}
}
// FetchFile attempts to fetch a file from the cache, using the supplied file stat, Path and serving client. Returns Error status
func FetchFile(client *Client, file *os.File, stat os.FileInfo, p *Path) error {
// If file too big, write direct to client
if stat.Size() > fileSizeMax {
return handleLargeFile(client, file, p)
}
// Get cache read lock
FileCache.RLock()
// Define cache here
var content FileContent
// Now check for file in cache
cached, ok := FileCache.Get(p.Absolute())
if !ok {
// Create new file contents with supplied function
content = newFileContent(p)
// Cache the file contents
err := content.Load(p, file)
if err != nil {
// Unlock, return error
FileCache.RUnlock()
return err
}
// Wrap contents in file, set fresh
cached = filecache.NewFile(p.Absolute(), true, content)
cached.UpdateLastRefresh()
// Try upgrade our lock, else error out (have to remember to unlock!!)
if !FileCache.UpgradeLock() {
FileCache.Unlock()
return ErrMutexUpgrade
}
// Put file in cache
FileCache.Put(cached)
// Try downgrade our lock, else error out (have to remember to runlock!!)
if !FileCache.DowngradeLock() {
FileCache.RUnlock()
return ErrMutexDowngrade
}
// Get file read lock
cached.RLock()
} else {
// Get file read lock
cached.RLock()
// Get contents from file
content = cached.Content().(FileContent)
// Check for file freshness
if !cached.IsFresh() {
// Try upgrade file lock, else error out (have to remember to unlock!!)
if !FileCache.UpgradeLock() {
FileCache.Unlock()
return ErrMutexUpgrade
}
// Refresh file contents
err := content.Load(p, file)
if err != nil {
// Unlock file, return error
FileCache.Unlock()
return err
}
// Set file as fresh
cached.UpdateLastRefresh()
// Try downgrade file lock, else error out (have to remember to runlock!!)
if !FileCache.DowngradeLock() {
FileCache.RUnlock()
return ErrMutexDowngrade
}
}
}
// Defer file + cache read unlock
defer func() {
cached.RUnlock()
FileCache.RUnlock()
}()
// Write to client
return content.WriteToClient(client, p)
}

@ -1,162 +0,0 @@
package core
// Core flag string constants
const (
sysLogFlagStr = "sys-log"
sysLogDescStr = "System log output location ['stdout', 'null', $filename]"
accLogFlagStr = "acc-log"
accLogDescStr = "Access log output location ['stdout', 'null', $filename]"
rootFlagStr = "root"
rootDescStr = "Server root directory"
bindFlagStr = "bind"
bindDescStr = "IP address to bind to"
hostnameFlagStr = "hostname"
hostnameDescStr = "Server hostname (FQDN)"
portFlagStr = "port"
portDescStr = "Port to listen on"
fwdPortFlagStr = "fwd-port"
fwdPortDescStr = "Outward-facing port"
readDeadlineFlagStr = "read-deadline"
readDeadlineDescStr = "Connection read deadline (timeout)"
writeDeadlineFlagStr = "write-deadline"
writeDeadlineDescStr = "Connection write deadline (timeout)"
connReadBufFlagStr = "conn-read-buf"
connReadBufDescStr = "Connection read buffer size (bytes)"
connWriteBufFlagStr = "conn-write-buf"
connWriteBufDescStr = "Connection write buffer size (bytes)"
connReadMaxFlagStr = "conn-read-max"
connReadMaxDescStr = "Connection read max (bytes)"
fileReadBufFlagStr = "file-read-buf"
fileReadBufDescStr = "File read buffer size (bytes)"
monitorSleepTimeFlagStr = "cache-monitor-freq"
monitorSleepTimeDescStr = "File cache freshness monitor frequency"
cacheFileMaxFlagStr = "cache-file-max"
cacheFileMaxDescStr = "Max cached file size (megabytes)"
cacheSizeFlagStr = "cache-size"
cacheSizeDescStr = "File cache size"
restrictPathsFlagStr = "restrict-paths"
restrictPathsDescStr = "Restrict paths as new-line separated list of regex statements (see documenation)"
hiddenPathsFlagStr = "hidden-paths"
hiddenPathsDescStr = "Hidden paths as new-line separated list of regex statements (see documentation)"
remapRequestsFlagStr = "remap-requests"
remapRequestsDescStr = "Remap requests as new-line separated list of remap statements (see documenation)"
cgiDirFlagStr = "cgi-dir"
cgiDirDescStr = "CGI scripts directory (empty to disable)"
maxCGITimeFlagStr = "max-cgi-time"
maxCGITimeDescStr = "Max CGI script execution time"
safePathFlagStr = "safe-path"
safePathDescStr = "CGI environment safe PATH variable"
httpCompatCGIFlagStr = "http-compat-cgi"
httpCompatCGIDescStr = "Enable HTTP compatibility for CGI scripts by stripping headers"
httpPrefixBufFlagStr = "http-prefix-buf"
httpPrefixBufDescStr = "Buffer size used for stripping HTTP headers"
userDirFlagStr = "user-dir"
userDirDescStr = "User subdir for personal server space"
versionFlagStr = "version"
versionDescStr = "Print version string"
)
// Log string constants
const (
hostnameBindEmptyStr = "At least one of hostname or bind-addr must be non-empty!"
chDirStr = "Entered server dir: %s"
chDirErrStr = "Error entering server directory: %s"
listenerBeginFailStr = "Failed to start listener on %s://%s:%s (%s:%s) - %s"
listeningOnStr = "Listening on %s://%s:%s (%s:%s)"
cacheMonitorStartStr = "Starting cache monitor with freq: %s"
cacheFileStatErrStr = "Failed to stat file in cache: %s"
pathRestrictionsEnabledStr = "Path restrictions enabled"
pathRestrictionsDisabledStr = "Path restrictions disabled"
pathRestrictRegexCompileFailStr = "Failed compiling restricted path regex: %s"
pathRestrictRegexCompiledStr = "Compiled restricted path regex: %s"
pathHidingEnabledStr = "Path hiding enabled"
pathHidingDisableStr = "Path hiding disabled"
pathHidingRegexCompileFailStr = "Failed compiling hidden path regex: %s"
pathHidingRegexCompiledStr = "Compiled hidden path regex: %s"
requestRemapEnabledStr = "Request remapping enabled"
requestRemapDisabledStr = "Request remapping disabled"
requestRemapRegexInvalidStr = "Invalid request remap regex: %s"
requestRemapRegexCompileFailStr = "Failed compiling request remap regex: %s"
requestRemapRegexCompiledStr = "Compiled path remap regex: %s"
requestRemappedStr = "Remapped request: %s %s"
cgiSupportEnabledStr = "CGI script support enabled"
cgiSupportDisabledStr = "CGI script support disabled"
cgiDirOutsideRootStr = "CGI directory must not be outside server root!"
cgiDirStr = "CGI directory: %s"
cgiHTTPCompatEnabledStr = "CGI HTTP compatibility enabled, prefix buffer: %d"
cgiExecuteErrStr = "Exit executing: %s [%d]"
userDirEnabledStr = "User directory support enabled"
userDirDisabledStr = "User directory support disabled"
userDirBackTraverseErrStr = "User directory with back-traversal not supported: %s"
userDirStr = "User directory: %s"
signalReceivedStr = "Signal received: %v. Shutting down..."
logOutputErrStr = "Error opening log output %s: %s"
pgidNotFoundErrStr = "Process unfinished, PGID not found!"
pgidStopErrStr = "Error stopping process group %d: %s"
connWriteErrStr = "Conn write error"
connReadErrStr = "Conn read error"
connCloseErrStr = "Conn close error"
listenerResolveErrStr = "Listener resolve error"
listenerBeginErrStr = "Listener begin error"
listenerAcceptErrStr = "Listener accept error"
invalidIPErrStr = "Invalid IP"
invalidPortErrStr = "Invalid port"
mutexUpgradeErrStr = "Mutex upgrade fail"
mutexDowngradeErrStr = "Mutex downgrade fail"
fileOpenErrStr = "File open error"
fileStatErrStr = "File stat error"
fileReadErrStr = "File read error"
fileTypeErrStr = "Unsupported file type"
directoryReadErrStr = "Directory read error"
restrictedPathErrStr = "Restricted path"
invalidRequestErrStr = "Invalid request"
cgiStartErrStr = "CGI start error"
cgiExitCodeErrStr = "CGI non-zero exit code"
cgiStatus400ErrStr = "CGI status: 400"
cgiStatus401ErrStr = "CGI status: 401"
cgiStatus403ErrStr = "CGI status: 403"
cgiStatus404ErrStr = "CGI status: 404"
cgiStatus408ErrStr = "CGI status: 408"
cgiStatus410ErrStr = "CGI status: 410"
cgiStatus500ErrStr = "CGI status: 500"
cgiStatus501ErrStr = "CGI status: 501"
cgiStatus503ErrStr = "CGI status: 503"
cgiStatusUnknownErrStr = "CGI status: unknown"
)

@ -0,0 +1,9 @@
// +build linux
package core
import "syscall"
// Temporary check on Linux to ensure
// correct Go version being used
var _ = syscall.AllThreadsSyscall

@ -1,75 +1,271 @@
package core
import (
"net/url"
"path"
"strings"
)
var (
// getRequestPaths points to either of the getRequestPath____ functions
getRequestPath func(string) *Path
"github.com/grufwub/go-errors"
)
// ParseURLEncodedRequest takes a received string and safely parses a request from this
func ParseURLEncodedRequest(received string) (*Request, Error) {
// Check for ASCII control bytes
for i := 0; i < len(received); i++ {
if received[i] < ' ' || received[i] == 0x7f {
return nil, NewError(InvalidRequestErr)
// HasAsciiControlBytes returns whether a byte slice contains ASCII control bytes
func HasAsciiControlBytes(raw string) bool {
for i := 0; i < len(raw); i++ {
if raw[i] < ' ' || raw[i] == 0xf {
return true
}
}
return false
}
// Split into 2 substrings by '?'. URL path and query
rawPath, params := splitBy(received, "?")
// ParseScheme attempts to parse a scheme from a raw url
func ParseScheme(raw string) (string, string, error) {
// If first char is:
// - valid ascii (but non-scheme), return here no errors
// - end of scheme char, return bad request error
c := raw[0]
switch {
case ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z'):
// All good, continue
case c == ':':
return "", "", ErrParsingScheme.Extend(raw)
default:
// Invalid scheme char (or scheme first-char) return
return "", raw, nil
}
// Unescape path
rawPath, err := url.PathUnescape(rawPath)
if err != nil {
return nil, WrapError(InvalidRequestErr, err)
// Iterate
for i := 1; i < len(raw); i++ {
c := raw[i]
switch {
case ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9') || c == '+' || c == '-' || c == '.':
// Is valid ASCII, do nothing
case c == ':':
// Return the scheme (lowercase)
return strings.ToLower(raw[:i]), raw[i+1:], nil
default:
// Invalid char, return
return "", raw, nil
}
}
return "", raw, nil
}
func isHex(b byte) bool {
return ('a' <= b && b <= 'f') ||
('A' <= b && b <= 'F') ||
('0' <= b && b <= '9')
}
func unHex(b byte) byte {
switch {
case '0' <= b && b <= '9':
return b - '0'
case 'a' <= b && b <= 'f':
return b - 'a' + 10
case 'A' <= b && b <= 'F':
return b - 'A' + 10
default:
return 0
}
}
func shouldEscape(b byte) bool {
// All alphanumeric are unreserved
if 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' || '0' <= b && b <= '9' {
return false
}
switch b {
// Further unreserved
case '-', '_', '.', '~':
return false
// All else should be escaped
default:
return true
}
}
func shouldHostEscape(b byte) bool {
switch b {
// Allowed host sub-delims +
// ':' for port +
// '[]' for ipv6 +
// '<>' only others we can allow (can't be % encoded)
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=', ':', '[', ']', '<', '>', '"':
return false
// Check all-else
default:
return shouldEscape(b)
}
}
func shouldPathEscape(b byte) bool {
switch b {
// Reserved character in path.
// Bear in mind ;, ARE allowed in a URL path,
// but when converting from a filesystem-->URL path
// (how this will be used), it will need escaping.
case '?', ';', ',':
return true
// Allowed in path
case '$', '&', '+', '/', ':', '=', '@':
return false
// Check all-else
default:
return shouldEscape(b)
}
}
func unescape(raw string, count int) string {
var t strings.Builder
t.Grow(len(raw) - 2*count)
for i := 0; i < len(raw); i++ {
switch raw[i] {
// Replace % encoded char
case '%':
t.WriteByte(unHex(raw[i+1])<<4 | unHex(raw[i+2]))
i += 2
// Write as-is
default:
t.WriteByte(raw[i])
}
}
return t.String()
}
func unescapeHost(raw string) (string, error) {
// Count all the percent signs
count := 0
for i := 0; i < len(raw); {
switch raw[i] {
case '%':
// Increase count
count++
// If not a valid % encoded hex value, return with error
if i+2 >= len(raw) || !isHex(raw[i+1]) || !isHex(raw[i+2]) {
return "", ErrUnescapingHost.Extend(raw)
}
// In the host component % encoding can only be used
// for non-ASCII bytes. And rfc6874 introduces %25 for
// escaped percent sign in IPv6 literals
if unHex(raw[i+1]) < 8 && raw[i:i+3] != "%25" {
return "", ErrUnescapingHost.Extend(raw)
}
// Skip iteration past the
// hex we just confirmed
i += 3
default:
// If within ASCII range, and shoud be escaped, return error
if raw[i] < 0x80 && shouldHostEscape(raw[i]) {
return "", ErrUnescapingHost.Extend(raw)
}
// Iter
i++
}
}
// Return new request
return &Request{getRequestPath(rawPath), params}, nil
// No encoding? return as-is. Else, escape
if count == 0 {
return raw, nil
}
return unescape(raw, count), nil
}
// ParseInternalRequest parses an internal request string based on the current directory
func ParseInternalRequest(p *Path, line string) *Request {
rawPath, params := splitBy(line, "?")
if path.IsAbs(rawPath) {
return &Request{getRequestPath(rawPath), params}
func unescapePath(raw string) (string, error) {
// Count all the percent signs
count := 0
length := len(raw)
for i := 0; i < length; {
switch raw[i] {
case '%':
// Increase count
count++
// If not a valid % encoded hex value, return with error
if i+2 >= length || !isHex(raw[i+1]) || !isHex(raw[i+2]) {
return "", ErrUnescapingPath.Extend(raw)
}
// Skip iteration past the
// hex we just confirmed
i += 3
default:
i++
}
}
// No encoding? return as-is. Else, escape
if count == 0 {
return raw, nil
}
return &Request{newSanitizedPath(p.Root(), rawPath), params}
return unescape(raw, count), nil
}
// getRequestPathUserDirEnabled creates a Path object from raw path, converting ~USER to user subdirectory roots, else at server root
func getRequestPathUserDirEnabled(rawPath string) *Path {
if userPath := strings.TrimPrefix(rawPath, "/"); strings.HasPrefix(userPath, "~") {
// We found a user path! Split into the user part, and remaining path
user, remaining := splitBy(userPath, "/")
// EscapePath escapes a URL path
func EscapePath(path string) string {
const upperhex = "0123456789ABCDEF"
// Empty user, we been duped! Return server root
if len(user) <= 1 {
return &Path{Root, "", "/"}
count := 0
for i := 0; i < len(path); i++ {
if shouldPathEscape(path[i]) {
count++
}
}
// Get sanitized user root, else return server root
root, ok := sanitizeUserRoot(path.Join("/home", user[1:], userDir))
if !ok {
return &Path{Root, "", "/"}
if count == 0 {
return path
}
sb := strings.Builder{}
sb.Grow(len(path) + 2*count)
for i := 0; i < len(path); i++ {
c := path[i]
if shouldPathEscape(c) {
sb.WriteByte('%')
sb.WriteByte(upperhex[c>>4])
sb.WriteByte(upperhex[c&15])
} else {
sb.WriteByte(c)
}
}
// Build new Path
rel := sanitizeRawPath(root, remaining)
sel := "/~" + user[1:] + formatSelector(rel)
return &Path{root, rel, sel}
return sb.String()
}
// ParseEncodedHost parses encoded host info, safely returning unescape host and port
func ParseEncodedHost(raw string) (string, string, error) {
// Unescape the host info
raw, err := unescapeHost(raw)
if err != nil {
return "", "", err.(errors.Error).WrapWithin(ErrParsingHost)
}
// Return regular server root + rawPath
return newSanitizedPath(Root, rawPath)
// Split by last ':' and return
host, port := SplitByLast(raw, ":")
return host, port, nil
}
// getRequestPathUserDirDisabled creates a Path object from raw path, always at server root
func getRequestPathUserDirDisabled(rawPath string) *Path {
return newSanitizedPath(Root, rawPath)
// ParseEncodedURI parses encoded URI, safely returning unescaped path and still-escaped query
func ParseEncodedURI(received string) (string, string, error) {
// Split into path and query
rawPath, query := SplitBy(received, "?")
// Unescape path, query is up-to CGI scripts
rawPath, err := unescapePath(rawPath)
if err != nil {
return "", "", err.(errors.Error).WrapWithin(ErrParsingURI)
}
// Return the raw path and query
return rawPath, query, nil
}

@ -0,0 +1,67 @@
package core_test
import (
"fmt"
"gophi/core"
"io/ioutil"
"net/url"
"testing"
)
var toEscape = []string{
"",
"abc",
"abc+def",
"a?b",
"one two",
"10%",
" ?&=#+%!<>#\"{}|\\^[]`☺\t:@$'()*,;",
}
var escaped = []string{
"",
"abc",
"abc+def",
"a%3Fb",
"one%20two",
"10%25",
"%20%3F&=%23+%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09:@$%27%28%29%2A%2C%3B",
}
func TestPathEscape(t *testing.T) {
for i, path := range toEscape {
if escapedPath := core.EscapePath(path); escapedPath != escaped[i] {
t.Fatalf("Failed escaping path!\nGot: %s\nExpected: %s\n", escapedPath, escaped[i])
}
}
}
func TestParseEncodedHost(t *testing.T) {
}
func TestParseEncodedURI(t *testing.T) {
}
func BenchmarkCorePathEscape(b *testing.B) {
var s string
for i := 0; i < b.N; i++ {
for _, path := range toEscape {
s = core.EscapePath(path)
}
}
fmt.Fprint(ioutil.Discard, s)
}
func BenchmarkURLPathEscape(b *testing.B) {
var s string
for i := 0; i < b.N; i++ {
for _, path := range toEscape {
// This isn't *exactly* a fair comparison to core.EscapePath,
// since url.PathEscape() escapes a path _segment_ as opposed
// to any entire path, whereas core.EscapePath() escapes an entire
// filesystem path.
s = url.PathEscape(path)
}
}
fmt.Fprint(ioutil.Discard, s)
}

@ -13,10 +13,42 @@ func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// SplitBy takes an input string and a delimiter, returning the resulting two strings from the split (ALWAYS 2)
func splitBy(input, delim string) (string, string) {
split := strings.SplitN(input, delim, 2)
if len(split) == 2 {
return split[0], split[1]
func SplitBy(input, delim string) (string, string) {
index := strings.Index(input, delim)
if index == -1 {
return input, ""
}
return split[0], ""
return input[:index], input[index+len(delim):]
}
// SplitByBefore takes an input string and a delimiter, returning resulting two string from split with the delim at beginning of 2nd
func SplitByBefore(input, delim string) (string, string) {
index := strings.Index(input, delim)
if index == -1 {
return input, ""
}
return input[:index], input[index:]
}
// SplitByLast takes an input string and a delimiter, returning resulting two strings from split at LAST occurence
func SplitByLast(input, delim string) (string, string) {
index := strings.LastIndex(input, delim)
if index == -1 {
return input, ""
}
return input[:index], input[index+len(delim):]
}
// FileExt returns the file extension of a file with a supplied path, doesn't include '.'
func FileExt(path string) string {
i := len(path) - 1
for ; i >= 0; i-- {
switch path[i] {
case '/':
return ""
case '.':
return path[i+1:]
}
}
return ""
}

@ -1,46 +1,35 @@
# CGI/1.1 Compliance
The list of environment variables that gophor sets are as follows.
The list of environment variables that gophi sets are as follows.
RFC 3875 standard:
```
# Set
GATEWAY INTERFACE
SERVER_SOFTWARE
SERVER_PROTOCOL
CONTENT_LENGTH
REQUEST_METHOD
SERVER_NAME
SERVER_PORT
REMOTE_ADDR
QUERY_STRING
SCRIPT_NAME
SCRIPT_FILENAME
# NOT set
Env Var | Reasoning
----------------------------------------------
PATH_INFO | This variable can fuck off, having to find the shortest
| valid part of path heirarchy in a URI every single
| CGI request so you can split and set this variable is SO
| inefficient. However, if someone more knowledgeable has
| other opinions or would like to point out where I'm wrong I
| will happily change my tune on this.
PATH_TRANSLATED | See above.
AUTH_TYPE | Until we implement authentication of some kind, ignoring.
CONTENT_TYPE | Very HTTP-centric relying on 'content-type' header.
REMOTE_IDENT | Remote client identity information.
REMOTE_HOST | Basically if the client has a resolving name (not just
| IP), not really necessary.
REMOTE_USER | Remote user id, not used as again no user auth yet.
```
# Both
GATEWAY INTERFACE | "CGI/1.1"
SERVER_SOFTWARE | "gophi" + protocol + version
SERVER_PROTOCOL | protocol
SERVER_NAME | server hostname
SERVER_PORT | server listening port
REMOTE_ADDR | client ip
QUERY_STRING | request query string
SCRIPT_NAME | script relative path
SCRIPT_FILENAME | script absolute path
PATH_INFO | remainder of URI's path after selector
DOCUMENT_ROOT | server root directory
REQUEST_URI | request URI (i.e. selector)
Non-standard:
# Gopher specific
COLUMNS | no. columns server is configured for
```
# Set
SELECTOR
DOCUMENT_ROOT
REQUEST_URI
# Gemini specific
GEMINI_URL | full gemini url (including hostname, port, scheme)
TLS_CIPHER | TLS cipher in use
TLS_VERSION | TLS version in use
AUTH_TYPE | "Certificate" when client certs found
REMOTE_USER | TLS client cert subject common name
TLs_CLIENT_HASH | TLS client cert sha256 hash ("SHA256:______")
TLS_CLIENT_NOT_BEFORE | TLS client cert not before date
TLS_CLIENT_NOT_AFTER | TLS client cert not after date
TLS_CLIENT_ISSUER | TLS client cert issuer
TLS_CLIENT_SUBJECT | TLS client cert subject
TLS_CLIENT_VERIFIED | TLS client cert validity, ("SUCCESS" or "FAIL:<error string>")
```

@ -0,0 +1,5 @@
# Features
- Serve `DIR/index.gmi` by default, else falls back to directory listing
- [coming soon...] vhosts!

@ -25,13 +25,13 @@ The features you will miss out on for these files are features listed
# Supported gophermap item types
All of the following item types are supported by Gophor, separated into
All of the following item types are supported by Gophi, separated into
grouped standards. Most handling of item types is performed by the clients
connecting to Gophor, but when performing directory listings Gophor will
connecting to Gophi, but when performing directory listings Gophi will
attempt to automatically classify files according to the below types.
Item types listed as `[SERVER ONLY]` means that these are item types
recognised ONLY by Gophor and to be used when crafting a gophermap. They
recognised ONLY by Gophi and to be used when crafting a gophermap. They
provide additional methods of formatting / functionality within a gophermap,
and the output of these item types is usually converted to informational
text lines before sending to connecting clients.
@ -89,13 +89,30 @@ We aim to comply more with GopherII (see in references below).
Supported item types are listed above.
Informational lines are sent as `i<text here>\t/\tnull.host\t0`.
Informational lines are sent as `i<text here>\tnull.host\t0`.
Titles are sent as `i<title text>\tTITLE\tnull.host\t0`.
Web address links are sent as `h<text here>\tURL:<address>\thostname\tport`.
An HTML redirect is sent in response to any requests beginning with `URL:`.
## Index search type
Index searches are split at the first tab in the request, and appended to the
params string of the request. Gophi does not handle index searches directly,
but the content of an index search query can be found in the `$QUERY_STRING`
CGI environment variable, it is prepended to any other params found and should
be set AFTER any query parameters otherwise the query params will be set as-is
for the `$QUERY_STRING` variable.
E.g.
`/cgi-bin/search.cgi\tsearch%20query%20here` will set:
`QUERY_STRING='search query here'`
`/cgi-bin/search.cgi?key=value\tsearch query here` will set:
`QUERY_STRING='key=value&search query here'`
## Policy files
Upon request, `caps.txt` can be provided from the server root directory
@ -111,7 +128,7 @@ Errors are sent according to GopherII standards, terminating with a last
line:
`3<error text>CR-LF`
Possible Gophor errors:
Possible Gophi errors:
```
Text | Meaning
400 Bad Request | Request not understood by server due to malformed
@ -134,15 +151,13 @@ Possible Gophor errors:
## Terminating full stop
Gophor will send a terminating full-stop for menus, but not for served
Gophi will send a terminating full-stop for menus, but not for served
or executed files.
## Placeholder (null) text
All of the following are used as placeholder text in responses...
Null selector: `-`
Null host: `null.host`
Null port: `0`

@ -4,27 +4,29 @@ By default, URLs are parsed as having standard (HTTP) URL encoding. All other
parsed text content are treated as UTF-8, as this is the default encoding scheme
Go strings. Support for more encoding schemes is planned for the future
# Chroots and Privilege Dropping
# Chroots & Privilege Dropping
Previously, chrooting to server directory and dropping privileges was supported
by using Go C bindings. This is not officially supported due to weird behaviour
with `.Set{U,G}id()` under Linux. As such, the feature has been dropped for
now.
Gophi supports entering a chroot and dropping privileges itself for a number
of reasons:
There is a near 10 year ongoing tracked issue
(https://github.com/golang/go/issues/1435), and as soon as this patch gets
merged I'll add support: https://go-review.googlesource.com/c/go/+/210639
- root privileges are required if you want to bind to a port < 1024
In place of removing this, request sanitization has been majorly improved and
checks are in place to prevent running Gophor as root.
- during server startup you can safely access files that require root privileges
e.g. accessing LetsEncrypt generated TLS certs for gemini hosting
If you run into issues binding to a lower port number due to insufficient
permissions then there are a few alternatives:
- you can keep configuration files and certificates required during startup out
of the chroot, which Gophi will load before it enters the chroot
- set gophor process capabilities: e.g.
`setcap 'cap_net_bind_service=+ep' /usr/local/bin/gophor`
Of course you are free to disable chroot (simply set `chroot = ""` in config)
and execute the binary as you wish in a chroot, container, etc.
- use Docker (or some other solution) and configure port forwarding on the
host
When executing CGI scripts from a chroot, you'll need to ensure you symlink
appropriate virtual filesystems and libaries for your scripts / binaries to run.
- start gopher in it's own namespace in a chroot
A chroot is generally not compatible with user spaces, unless you bind
mount the user spaces to directories within the chroot i.e.
```
/home/user/public_gopher -> /var/examplechroot/home/user/public_gopher
/home/user/public_gemini -> /var/examplechroot/home/user/public_gemini
```

@ -1,54 +1,12 @@
# Gophor Arguments
# Configuration
```
Usage: %s [-v|--version] [-c|--config $file]
```
gophor [args]
-root Server root directory
-bind-addr IP address to bind to
-port Port to listen on
-fwd-port Outward-facing port
-hostname Server hostname (FQDN)
-sys-log System log output location ['stdout', 'null', $filename]
-acc-log Access log output location ['stdout', 'null', $filename]
-cache-monitor-freq File cache freshness monitor frequency
-cache-size File cache size
-cache-file-max Max cached file size (megabytes)
-restrict-paths Restrict paths as new-line separated list of regex statements
(see documenation below)
-remap-requests Remap requests as new-line separated list of remap statements
(see documenation below)
-cgi-dir CGI scripts directory (empty to disable)
-max-cgi-time Max CGI script execution time
-safe-path CGI environment safe PATH variable
-http-compat-cgi Enable HTTP compatibility for CGI scripts by stripping headers
-http-prefix-buf Buffer size used for stripping HTTP headers
-user-dir User subdir for personal server space
-read-deadline Connection read deadline (timeout)
-write-deadline Connection write deadline (timeout)
-conn-read-buf Connection read buffer size (bytes)
-conn-write-buf Connection write buffer size (bytes)
-conn-read-max Connection read max (bytes)
-file-read-buf File read buffer size (bytes)
-version Print version string
# Gopher Specific Arguments
-page-width Gopher page width
-footer-text Footer text (empty to disable)
-subgopher-size-max Subgophermap size max (megabytes)
If no configuration file is provided, `/etc/gophi.${proto}.conf` is the default path,
where `${proto}` is one of `gemini` or `gopher` depending on which version you compiled.
-admin Generated policy file admin email
-description Generated policy file server description
-geolocation Generated policy file server geolocation
```
For configuration infomration, see: [Example Configuration](docs/example.toml)
# Path Restrictions Regex

@ -0,0 +1,128 @@
# Chroot directory (empty to disable)
chroot = "/var/gophi"
# Server root. If chroot enabled
# this will be seen as relative to
# the server chroot
root = "/"
# UNIX user and group names server
# should run under:
# - if user supplied but no group, then
# user's primary group is used
# - if both are blank, runs as current
# user
user = "grufwub"
group = ""
# Server bind address
listen = "127.0.0.1"
# Server hostname
hostname = "localhost"
# Port to listen on
port = 1024
# Enable user server spaces, e.g.
# ~/public_{gopher,gemini}
user-spaces = false
[connection]
# Connection read timeout
read-timeout = "5s"
# Connection write timeout
write-timeout = "15s"
# Connection write buffer size (in bytes)
write-buf = 1024
# Connection read max (in bytes), with
# max read sizes this low we don't bother
# buffering reads
read-max = 1024
[filesystem]
# File read buffer size (in bytes)
read-buf = 1024
[filesystem.cache]
# Filesystem monitor check freq.
monitor-freq = "60s"
# Maximum cached file size (in MB)
file-max = 1.0
# Maximum file age before mark
# as stale, i.e. safe to be
# removed on next monitor sweep
age-max = "5m"
# Cache size count
size= 100
[requests]
# NOTE: please use apostrophe declared
# strings (i.e. ' not ") otherwise
# backslashes will need to be escaped
# String array of filesystem path
# regex statements to restrict.
restrict = [
'/(.+/)?\.[a-zA-Z0-9-_.]+',
]
# String array of filesystem path
# regex statements to hide from dir
# listings
hidden = [
'',
]
# String array of request remapping
# regex statements
remap = [
'',
]
[log]
# Log output locations, options:
# - stdout -> /dev/stdout
# - stderr -> /dev/stderr
# - null -> /dev/null
# - $file -> $file
system = "stdout"
access = "stdout"
[cgi]
# Relative CGI scripts directory
# path within server root
directory = "cgi-bin"
# CGI environment $PATH
safe-path = "/bin:/usr/bin"
# Gopher specific configuration, uncomment
# if you have built gophi as a gopher server
#[gopher]
# # Page width before line truncation
# page-width = 80
#
# # Footer text included below gophermaps
# footer = ""
#
# # Subgophermap size max (in megabytes)
# subgopher-max = 1.0
#
# # Information included in caps.txt
# # policy file
# admin-email = ""
# description = ""
# geolocation = ""
# Gemini specific configuration, uncomment
# if you have built gophi as a gemini server
#[gemini]
# tls-cert = ""
# tls-key = ""

@ -0,0 +1,68 @@
package gemini
import (
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/hex"
"gophi/core"
)
// Mapped to tls.Version__ where:
// value - 0x300 = index.
// Deprecated versions will always be
// prefixed by 'DEPRECATED'
var tlsVersionStrings = []string{
"DEPRECATED:SSLv3",
"TLSv1.0",
"TLSv1.1",
"TLSv1.2",
"TLSv1.3",
}
func certSha256Hash(cert *x509.Certificate) string {
checksum := sha256.Sum256(cert.Raw)
return hex.EncodeToString(checksum[:])
}
func appendCgiEnv(client *core.Client, request *core.Request, env []string) []string {
// Build and append the full gemini url
env = append(env, "GEMINI_URL=gemini://"+core.Hostname+":"+core.Port+request.Path().Selector())
// Cast client underlying net.Conn as tls.Conn
tlsConn := client.Conn().Conn().(*tls.Conn)
state := tlsConn.ConnectionState()
// Append TLS env vars
env = append(env, "TLS_CIPHER="+tls.CipherSuiteName(state.CipherSuite))
env = append(env, "TLS_VERSION="+tlsVersionStrings[state.Version-0x300])
// Append TLS client cert vars (if present!)
clientCerts := state.PeerCertificates
if len(clientCerts) > 0 {
// Only use first if multiple client
// certs available
cert := clientCerts[0]
// Verify client cert
var isVerified string
_, err := cert.Verify(x509.VerifyOptions{})
if err != nil {
isVerified = "FAILED:" + err.Error()
} else {
isVerified = "SUCCESS"
}
// Set user cert environment vars
env = append(env, "AUTH_TYPE=Certificate")
env = append(env, "REMOTE_USER="+cert.Subject.CommonName)
env = append(env, "TLS_CLIENT_HASH=SHA256:"+certSha256Hash(cert))
env = append(env, "TLS_CLIENT_NOT_BEFORE="+cert.NotBefore.String())
env = append(env, "TLS_CLIENT_NOT_AFTER="+cert.NotAfter.String())
env = append(env, "TLS_CLIENT_ISSUER="+cert.Issuer.String())
env = append(env, "TLS_CLIENT_SUBJECT="+cert.Subject.String())
env = append(env, "TLS_CLIENT_VERIFIED="+isVerified)
}
return env
}

@ -0,0 +1,95 @@
package gemini
import (
"gophi/core"
"github.com/grufwub/go-errors"
)
// Gemini specific base errors
var (
errInvalidScheme = errors.BaseError("invalid request scheme")
errProxyRequest = errors.BaseError("host:port pair differ from our own")
)
// Gemini status codes
var (
statusInput = "10"
statusSensitive = "11"
statusTemporaryRedirect = "30"
statusPermanentRedirect = "31"
statusTemporaryFailure = "40"
statusServerUnavailable = "41"
statusCGIError = "42"
statusProxyError = "43"
statusSlowDown = "44"
statusPermanentFailure = "50"
statusNotFound = "51"
statusGone = "52"
statusProxyRequestRefused = "53"
statusBadRequest = "59"
statusClientCertificateRequired = "60"
statusClientCertificateNotAuthorized = "61"
statusCertificateNotValid = "62"
)
// Gemini error responses
var (
// more specific respnoses
errConnReadRsp = buildResponseHeader(statusTemporaryFailure, "Read Failure")
errRestrictedRsp = buildResponseHeader(statusNotFound, "Restricted Path")
errInvalidSchemeRsp = buildResponseHeader(statusProxyRequestRefused, "Unsupported Scheme")
errProxyRequestRsp = buildResponseHeader(statusProxyRequestRefused, "Proxying Unsupported")
// generic responses
errNotFoundRsp = buildResponseHeader(statusNotFound, "Not Found")
errTemporaryFailureRsp = buildResponseHeader(statusTemporaryFailure, "Temporary Failure")
errPermanentFailureRsp = buildResponseHeader(statusPermanentFailure, "Permanent Failure")
errBadRequestRsp = buildResponseHeader(statusBadRequest, "Bad Request")
)
// generateErrorResponse takes an error code and generates an error response byte slice
func generateErrorResponse(err error) ([]byte, bool) {
switch {
case errors.Is(err, core.ErrConnWrite):
return nil, false // no point responding if we couldn't write
case errors.Is(err, core.ErrConnRead):
return errConnReadRsp, true
case errors.Is(err, core.ErrConnClose):
return nil, false // no point responding if we couldn't close
case errors.Is(err, core.ErrMutexUpgrade):
return errTemporaryFailureRsp, true
case errors.Is(err, core.ErrMutexDowngrade):
return errTemporaryFailureRsp, true
case errors.Is(err, core.ErrFileOpen):
return errNotFoundRsp, true
case errors.Is(err, core.ErrFileStat):
return errNotFoundRsp, true
case errors.Is(err, core.ErrFileRead):
return errNotFoundRsp, true
case errors.Is(err, core.ErrFileType):
return errNotFoundRsp, true
case errors.Is(err, core.ErrDirectoryRead):
return errNotFoundRsp, true
case errors.Is(err, core.ErrRestrictedPath):
return errRestrictedRsp, true
case errors.Is(err, core.ErrInvalidRequest):
return errBadRequestRsp, true
case errors.Is(err, core.ErrParsingScheme):
return errBadRequestRsp, true
case errors.Is(err, core.ErrParsingHost):
return errBadRequestRsp, true
case errors.Is(err, core.ErrParsingURI):
return errBadRequestRsp, true
case errors.Is(err, core.ErrCGIStart):
return errPermanentFailureRsp, true
case errors.Is(err, core.ErrCGIExitCode):
return errTemporaryFailureRsp, true
case errors.Is(err, errInvalidScheme):
return errInvalidSchemeRsp, true
case errors.Is(err, errProxyRequest):
return errProxyRequestRsp, true
default:
return nil, false
}
}

@ -0,0 +1,36 @@
package gemini
import (
"gophi/core"
"os"
)
type headerPlusFileContent struct {
contents []byte
}
// WriteToClient writes the current contents of FileContents to the client
func (fc *headerPlusFileContent) WriteToClient(client *core.Client, p *core.Path) error {
return client.Conn().Write(fc.contents)
}
// Load takes an open FD and loads the file contents into FileContents memory
func (fc *headerPlusFileContent) Load(p *core.Path, file *os.File) error {
// Read the file contents
contents, err := core.ReadFile(file)
if err != nil {
return err
}
// Set sucess header + mime type response header
header := buildResponseHeader("20", getFileStatusMeta(p))
// Set the store contents and return ok
fc.contents = append(header, contents...)
return nil
}
// Clear empties currently cached FileContents memory
func (fc *headerPlusFileContent) Clear() {
fc.contents = nil
}

@ -0,0 +1,34 @@
package gemini
import (
"gophi/core"
)
const gemMimeType = "text/gemini"
func getFileStatusMeta(p *core.Path) string {
// if this is a gem, return this
if isGem(p) {
return gemMimeType
}
// Get file extension
ext := core.FileExt(p.Relative())
// Try get the mime type, or use default unknown (application octet-stream)
mimeType, ok := mimeTypes[ext]
if !ok {
mimeType = "application/octet-stream"
}
// Calculate mime type for extension
return mimeType
}
func buildRedirect(to string) []byte {
return buildResponseHeader("31", to)
}
func buildResponseHeader(statusCode, statusMeta string) []byte {
return []byte(statusCode + " " + statusMeta + "\r\n")
}

@ -0,0 +1,68 @@
package gemini
import (
"crypto/rand"
"crypto/tls"
"gophi/core"
"io"
"github.com/grufwub/go-config"
"github.com/grufwub/go-logger"
)
func init() {
// As part of init perform initial entropy assertion
b := make([]byte, 1)
_, err := io.ReadFull(rand.Reader, b)
if err != nil {
logger.Fatal("Failed to assert safe source of system entropy exists!")
}
}
// Run does as says :)
func Run() {
// Create new TOML config parser
tree := make(config.Tree)
// Parse gemini specific flags, then all
certFile := tree.String("gemini.tls-cert", "")
keyFile := tree.String("gemini.tls-key", "")
core.ParseConfigAndSetup(
tree,
"gemini",
1965,
func() (*core.Listener, error) {
// Load the supplied key pair
cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
if err != nil {
return nil, err
}
// Create TLS config
config := &tls.Config{
Certificates: []tls.Certificate{cert},
}
config.Rand = rand.Reader
// Create listener!
l, err := tls.Listen("tcp", core.Bind+":"+core.Port, config)
if err != nil {
return nil, err
}
// Return wrapper listener
return core.NewListener(l), nil
},
newFileContent,
handleDirectory,
handleLargeFile,
appendCgiEnv,
)
// Generate the root redirect byte slice
// (has to be done here once the Hostname and Port have been set)
rootRedirect = buildRedirect("gemini://" + core.Hostname + ":" + core.Port + "/")
// Start!
core.Start(serve)
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,16 @@
package gemini
import (
"gophi/core"
"regexp"
)
var (
// gemRegex is the precompiled gemini file name regex check
gemRegex = regexp.MustCompile(`^(|.+/|.+\.)gmi$`)
)
// isGem checks against gemini regex as to whether a file path is a gemini file
func isGem(path *core.Path) bool {
return gemRegex.MatchString(path.Relative())
}

@ -0,0 +1,175 @@
package gemini
import (
"gophi/core"
"os"
"strings"
)
// rootRedirectHeader stores the root redirect header byte slice,
// because there is no need in recalculating it every time it's needed
var rootRedirect []byte
// serve is the global gemini server's serve function
func serve(client *core.Client) {
// Receive line from client
received, err := client.Conn().ReadLine()
if err != nil {
client.LogError("Conn read fail")
handleError(client, err)
return
}
raw := string(received)
// Ensure is a valid URL string
if core.HasAsciiControlBytes(raw) {
client.LogError("Invalid request: %s", raw)
handleError(client, core.ErrInvalidRequest.Extendf("%s has ascii control bytes", raw))
return
}
// Get the URL scheme (or error!)
scheme, path, err := core.ParseScheme(raw)
if err != nil {
client.LogError("Invalid request: %s", raw)
handleError(client, err)
return
}
// Infer no schema as 'gemini', else check we
// were explicitly provided 'gemini'
if scheme != "" && scheme != "gemini" {
client.LogError("Invalid request: %s", raw)
handleError(client, errInvalidScheme.Extend(scheme))
return
}
// Split by first '/' (with prefix '//' trimmed) to get host info and path strings
host, path := core.SplitByBefore(strings.TrimPrefix(path, "//"), "/")
// Parse the URL encoded host info
host, port, err := core.ParseEncodedHost(host)
if err != nil {
client.LogError("Invalid request: %s", raw)
handleError(client, err)
return
}
// Check the host and port are our own (empty port is allowed)
if host != core.Hostname || (port != "" && port != core.Port) {
client.LogError("Invalid request: %s", raw)
handleError(client, errProxyRequest.Extend(host+":"+port))
return
}
// Parse the encoded URI into path and query components
path, query, err := core.ParseEncodedURI(path)
if err != nil {
client.LogError("Invalid request: %s", raw)
handleError(client, err)
return
}
// Redirect empty path to root
if len(path) < 1 {
client.LogInfo("Redirect to: /")
client.Conn().Write(rootRedirect)
return
}
// Build new Request from raw path and query
request := core.NewRequest(core.BuildPath(path), query)
// Handle the request! And finally, error
err = core.HandleClient(client, request)
if err != nil {
handleError(client, err)
client.LogError("Failed to serve: %s", request.String())
} else {
client.LogInfo("Served: %s", request.String())
}
}
// handleError determines whether to send an error response to the client, and logs to system
func handleError(client *core.Client, err error) {
response, ok := generateErrorResponse(err)
if ok {
client.Conn().Write(response)
}
core.SystemLog.Error(err.Error())
}
func handleDirectory(client *core.Client, file *os.File, p *core.Path) error {
// First check for index gem, create gem Path object
indexGem := p.JoinPathUnsafe("index.gmi")
// If index gem exists, we fetch this
file2, err := core.OpenFile(indexGem)
if err == nil {
stat, err := file2.Stat()
if err == nil {
// Fetch gem and defer close
defer file2.Close()
return core.FetchFile(client, file2, stat, indexGem)
}
// Else, just close fd2
file2.Close()
}
// Slice to write
dirContents := make([]byte, 0)
// Escape the previous dir
dirSel := core.EscapePath(p.SelectorDir())
// Add directory heading, empty line and a back line
dirContents = append(dirContents, []byte("["+core.Hostname+p.Selector()+"]\n\n")...)
dirContents = append(dirContents, []byte("=> "+dirSel+" ..\n")...)
// Scan directory and build lines
err = core.ScanDirectory(
file,
p,
func(file os.FileInfo, fp *core.Path) {
// Calculate escaped selector path
sel := core.EscapePath(fp.Selector())
// If it's a dir, append final '/' to selector
if file.IsDir() {
sel += "/"
}
// Append new formatted file listing
dirContents = append(dirContents, []byte("=> "+sel+" "+file.Name()+"\n")...)
},
)
if err != nil {
return err
}
// Generate gem file header
header := buildResponseHeader("20", gemMimeType)
// Write contents!
return client.Conn().Write(append(header, []byte(dirContents)...))
}
func handleLargeFile(client *core.Client, file *os.File, p *core.Path) error {
// Build the response header
header := buildResponseHeader("20", getFileStatusMeta(p))
// Write the initial header (or return!)
err := client.Conn().Write(header)
if err != nil {
return err
}
// Finally write directly from file
return client.Conn().ReadFrom(file)
}
// newFileContents returns a new FileContents object
func newFileContent(p *core.Path) core.FileContent {
return &headerPlusFileContent{}
}

@ -0,0 +1,12 @@
#!/bin/sh
[ $# -ne 1 ] && {
echo "Usage: ${0} <hostname>"
exit 1
}
echo "Generating TLS cert + privkey for: ${1}"
openssl req -x509 -newkey rsa:4096 -sha256 \
-days 365 -nodes -keyout "${1}.key" \
-out "${1}.crt" -subj "/CN=${1}" \
-addext "subjectAltName=DNS:${1}"

@ -0,0 +1,44 @@
#!/bin/sh
# Mime types JSON source
URL='https://raw.githubusercontent.com/micnic/mime.json/master/index.json'
# Define intro to file
FILE='
// This is an automatically generated file, do not edit
package gemini
var mimeTypes = map[string]string{
// Mimetype for empty file extensions
"": "application/octet-stream",
// Begin file extension definitions
'
# Set break on new-line
IFS='
'
for line in $(curl -fL "$URL" | grep -E '".+"\s*:\s*".+"'); do
# Trim final whitespace
line=$(echo "$line" | sed -e 's|\s*$||')
# Ensure it ends in a comma
[ "${line%,}" = "$line" ] && line="${line},"
# Add to file
FILE="${FILE}${line}
"
done
# Add final statement to file
FILE="${FILE}
}
"
# Write to file
echo "$FILE" > 'gemini/mime.go'
# Check for valid go
goimports -w 'gemini/mime.go'

@ -1,3 +1,11 @@
module gophi
go 1.14
go 1.15
require (
github.com/grufwub/go-bufpools v0.1.1
github.com/grufwub/go-config v0.1.0
github.com/grufwub/go-errors v0.3.2
github.com/grufwub/go-filecache v0.1.0
github.com/grufwub/go-logger v0.1.1
)

@ -0,0 +1,16 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/grufwub/go-bufpools v0.1.1 h1:TOUKNY+UaQ784EtvP+wKoXssYQQRX5zewtrZ7u4C81M=
github.com/grufwub/go-bufpools v0.1.1/go.mod h1:ITqLRtG+W1bZHGdkWewV7inb+GcWfq2Jcjqx4AZ7aBY=
github.com/grufwub/go-config v0.1.0 h1:/UDEmprs4h4qEkgmQqthmtGZeJs8eB44qMVSqa+5sxU=
github.com/grufwub/go-config v0.1.0/go.mod h1:0U5Y0EkNeL09YkY70fNZv4Kelfayp/VroEs2UzmUG04=
github.com/grufwub/go-errors v0.3.2 h1:IB17KWLB+NNXCb+YUPMxPpvlNXGpxt0Xpad7wPxxRoo=
github.com/grufwub/go-errors v0.3.2/go.mod h1:AXGtU2fWv8ejaUUT0+9wTOlWqcxYDo8wuYnhrYtoBKM=
github.com/grufwub/go-filecache v0.1.0 h1:OugzIHzLco8LLRnAlD7m6zSFQTILjltNO8Hhr/8vcCo=
github.com/grufwub/go-filecache v0.1.0/go.mod h1:iAfqEfsC5YsyGD+f8JducuWeRqCDBVPi1+VmCaPL07Q=
github.com/grufwub/go-logger v0.1.1 h1:KnD6NNyeq3cz6dZKW/Gr+Fz9dNvkLf8KvYZXKGM5cN0=
github.com/grufwub/go-logger v0.1.1/go.mod h1:pZny1PMTpy9FAKMbaDYbPJbthl0wrSpVoIcnEjkRZaQ=
github.com/grufwub/go-upmutex v0.1.0 h1:ePACrB9VwGjBDqYfdJB8tDnnq3PNn05I4DhR0p1NT6M=
github.com/grufwub/go-upmutex v0.1.0/go.mod h1:Eb/BM4cKjBdmbwJ0XJ4GxIeSFCBOIWzIUx7RN/VKHNs=
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=

@ -0,0 +1,11 @@
package gopher
import (
"gophi/core"
"strconv"
)
func appendCgiEnv(client *core.Client, request *core.Request, env []string) []string {
env = append(env, "COLUMNS="+strconv.Itoa(pageWidth))
return env
}

@ -1,95 +1,83 @@
package gopher
import "gophi/core"
import (
"gophi/core"
// Gopher specific error codes
"github.com/grufwub/go-errors"
)
// Gopher specific errors
var (
errInvalidGophermap = errors.BaseError("invalid gophermap")
errSubgophermapIsDir = errors.BaseError("subgophermap path is dir")
errSubgophermapSize = errors.BaseError("subgophermap size too large")
)
// Gopher response error text
const (
InvalidGophermapErr core.ErrorCode = 1
SubgophermapIsDirErr core.ErrorCode = 2
SubgophermapSizeErr core.ErrorCode = 3
statusBadRequest = "400 Bad Request"
statusUnauthorized = "401 Unauthorised"
statusForbidden = "403 Forbidden"
statusNotFound = "404 Not Found"
statusRequestTimeout = "408 Request Time-out"
statusGone = "410 Gone"
statusInternalServerError = "500 Internal Server Error"
statusNotImplemented = "501 Not Implemented"
statusServiceUnavailable = "503 Service Unavailable"
)
// generateErrorMessage returns a message for any gopher specific error codes
func generateErrorMessage(code core.ErrorCode) string {
switch code {
case InvalidGophermapErr:
return invalidGophermapErrStr
case SubgophermapIsDirErr:
return subgophermapIsDirErrStr
case SubgophermapSizeErr:
return subgophermapSizeErrStr
default:
return unknownErrStr
}
}
// Gopher error responses
var (
errBadRequestRsp = buildErrorLine(statusBadRequest)
errUnauthorizedRsp = buildErrorLine(statusUnauthorized)
errForbiddenRsp = buildErrorLine(statusForbidden)
errNotFoundRsp = buildErrorLine(statusNotFound)
errRequestTimeoutRsp = buildErrorLine(statusRequestTimeout)
errGoneRsp = buildErrorLine(statusGone)
errInternalServerErrorRsp = buildErrorLine(statusInternalServerError)
errNotImplementedRsp = buildErrorLine(statusNotImplemented)
errServiceUnavailableRsp = buildErrorLine(statusServiceUnavailable)
)
// generateErrorResponse takes an error code and generates an error response byte slice
func generateErrorResponse(code core.ErrorCode) ([]byte, bool) {
switch code {
case core.ConnWriteErr:
func generateErrorResponse(err error) ([]byte, bool) {
switch {
case errors.Is(err, core.ErrConnWrite):
return nil, false // no point responding if we couldn't write
case core.ConnReadErr:
return buildErrorLine(errorResponse503), true
case core.ConnCloseErr:
case errors.Is(err, core.ErrConnRead):
return errServiceUnavailableRsp, true
case errors.Is(err, core.ErrConnClose):
return nil, false // no point responding if we couldn't close
case core.ListenerResolveErr:
return nil, false // not user facing
case core.ListenerBeginErr:
return nil, false // not user facing
case core.ListenerAcceptErr:
return nil, false // not user facing
case core.InvalidIPErr:
return nil, false // not user facing
case core.InvalidPortErr:
return nil, false // not user facing
case core.MutexUpgradeErr:
return buildErrorLine(errorResponse500), true
case core.MutexDowngradeErr:
return buildErrorLine(errorResponse500), true
case core.FileOpenErr:
return buildErrorLine(errorResponse404), true
case core.FileStatErr:
return buildErrorLine(errorResponse500), true
case core.FileReadErr:
return buildErrorLine(errorResponse500), true
case core.FileTypeErr:
return buildErrorLine(errorResponse404), true
case core.DirectoryReadErr:
return buildErrorLine(errorResponse500), true
case core.RestrictedPathErr:
return buildErrorLine(errorResponse403), true
case core.InvalidRequestErr:
return buildErrorLine(errorResponse400), true
case core.CGIStartErr:
return buildErrorLine(errorResponse500), true
case core.CGIExitCodeErr:
return buildErrorLine(errorResponse500), true
case core.CGIStatus400Err:
return buildErrorLine(errorResponse400), true
case core.CGIStatus401Err:
return buildErrorLine(errorResponse401), true
case core.CGIStatus403Err:
return buildErrorLine(errorResponse403), true
case core.CGIStatus404Err:
return buildErrorLine(errorResponse404), true
case core.CGIStatus408Err:
return buildErrorLine(errorResponse408), true
case core.CGIStatus410Err:
return buildErrorLine(errorResponse410), true
case core.CGIStatus500Err:
return buildErrorLine(errorResponse500), true
case core.CGIStatus501Err:
return buildErrorLine(errorResponse501), true
case core.CGIStatus503Err:
return buildErrorLine(errorResponse503), true
case core.CGIStatusUnknownErr:
return buildErrorLine(errorResponse500), true
case InvalidGophermapErr:
return buildErrorLine(errorResponse500), true
case SubgophermapIsDirErr:
return buildErrorLine(errorResponse500), true
case SubgophermapSizeErr:
return buildErrorLine(errorResponse500), true
case errors.Is(err, core.ErrMutexUpgrade):
return errServiceUnavailableRsp, true
case errors.Is(err, core.ErrMutexDowngrade):
return errServiceUnavailableRsp, true
case errors.Is(err, core.ErrFileOpen):
return errNotFoundRsp, true
case errors.Is(err, core.ErrFileStat):
return errNotFoundRsp, true
case errors.Is(err, core.ErrFileRead):
return errNotFoundRsp, true
case errors.Is(err, core.ErrFileType):
return errNotFoundRsp, true
case errors.Is(err, core.ErrDirectoryRead):
return errNotFoundRsp, true
case errors.Is(err, core.ErrRestrictedPath):
return errForbiddenRsp, true
case errors.Is(err, core.ErrInvalidRequest):
return errBadRequestRsp, true
case errors.Is(err, core.ErrParsingScheme):
return errBadRequestRsp, true
case errors.Is(err, core.ErrCGIStart):
return errInternalServerErrorRsp, true
case errors.Is(err, core.ErrCGIExitCode):
return errServiceUnavailableRsp, true
case errors.Is(err, errInvalidGophermap):
return errInternalServerErrorRsp, true
case errors.Is(err, errSubgophermapIsDir):
return errInternalServerErrorRsp, true
case errors.Is(err, errSubgophermapSize):
return errInternalServerErrorRsp, true
default:
return nil, false
}

@ -0,0 +1,53 @@
package gopher
import (
"gophi/core"
"os"
)
// generatedFileContents is a simple core.FileContent implementation for holding onto a generated (virtual) file contents
type generatedFileContent struct {
content []byte
}
// ReadAllFrom does nothing for generated content
func (fc *generatedFileContent) Load(p *core.Path, file *os.File) error { return nil }
// WriteAllTo writes the generated FileContent to client
func (fc *generatedFileContent) WriteToClient(client *core.Client, p *core.Path) error {
return client.Conn().Write(fc.content)
}
// Clear does nothing
func (fc *generatedFileContent) Clear() {}
// gophermapContents is an implementation of core.FileContent that holds individually renderable sections of a gophermap
type gophermapContent struct {
sections []gophermapSection
}
// Load takes an open FD and loads the gophermap contents into memory as different renderable sections
func (gc *gophermapContent) Load(path *core.Path, file *os.File) error {
var err error
gc.sections, err = readGophermap(file, path)
return err
}
// WriteToClient renders each cached section of the gophermap, and writes them to the client
func (gc *gophermapContent) WriteToClient(client *core.Client, path *core.Path) error {
// Render + write the sections!
for _, section := range gc.sections {
err := section.RenderAndWrite(client)
if err != nil {
return err
}
}
// Finally, write the footer (including last-line)
return client.Conn().Write(footer)
}
// Clear empties currently cached GophermapContents memory
func (gc *gophermapContent) Clear() {
gc.sections = nil
}

@ -1,37 +0,0 @@
package gopher
import (
"gophi/core"
"os"
)
// gophermapContents is an implementation of core.FileContents that holds individually renderable sections of a gophermap
type gophermapContents struct {
sections []gophermapSection
}
// WriteToClient renders each cached section of the gophermap, and writes them to the client
func (gc *gophermapContents) WriteToClient(client *core.Client, path *core.Path) core.Error {
// Render + write the sections!
for _, section := range gc.sections {
err := section.RenderAndWrite(client)
if err != nil {
return err
}
}
// Finally, write the footer (including last-line)
return client.Conn().Write(footer)
}
// Load takes an open FD and loads the gophermap contents into memory as different renderable sections
func (gc *gophermapContents) Load(fd *os.File, path *core.Path) core.Error {
var err core.Error
gc.sections, err = readGophermap(fd, path)
return err
}
// Clear empties currently cached GophermapContents memory
func (gc *gophermapContents) Clear() {
gc.sections = nil
}

@ -14,38 +14,25 @@ const (
errorSelector = "/error_selector_length"
)
var (
// pageWidth is the maximum set page width of a gophermap document to render to
pageWidth int
// footer holds the formatted footer text (if supplied), and gophermap last-line
footer []byte
)
// formatName formats a gopher line name string
func formatName(name string) string {
if len(name) > pageWidth {
return name[:pageWidth-4] + "...\t"
return name[:pageWidth-4] + "..."
}
return name + "\t"
return name
}
// formatSelector formats a gopher line selector string
func formatSelector(selector string) string {
if len(selector) > maxSelectorLen {
return errorSelector + "\t"
return errorSelector
}
return selector + "\t"
}
// formatHostPort formats a gopher line host + port
func formatHostPort(host, port string) string {
return host + "\t" + port
return core.EscapePath(selector)
}
// replacePlacementStrs replaces any placement strings found in the line (e.g $hostname, $port)
func replacePlacementStrs(line string) string {
split := strings.Split(line, "\t")
split := strings.SplitN(line, "\t", 4)
// Either invalid line, or information line without host + port
if len(split) != 4 {
@ -56,32 +43,33 @@ func replacePlacementStrs(line string) string {
return split[0] + "\t" +
split[1] + "\t" +
strings.Replace(split[2], "$host", core.Hostname, 1) + "\t" +
strings.Replace(split[3], "$port", core.FwdPort, 1)
strings.Replace(split[3], "$port", core.Port, 1)
}
// buildLine builds a gopher line string
func buildLine(t ItemType, name, selector, host, port string) []byte {
return []byte(string(t) + formatName(name) + formatSelector(selector) + formatHostPort(host, port) + "\r\n")
return []byte(string(t) + formatName(name) + "\t" + formatSelector(selector) + "\t" + host + "\t" + port + "\r\n")
}
// buildInfoLine builds a gopher info line string
func buildInfoLine(line string) []byte {
return []byte(string(typeInfo) + formatName(line) + formatHostPort(nullHost, nullPort) + "\r\n")
return []byte(string(typeInfo) + formatName(line) + "\t\t" + nullHost + "\t" + nullPort + "\r\n")
}
// buildErrorLine builds a gopher error line string
func buildErrorLine(selector string) []byte {
return []byte(string(typeError) + selector + "\r\n" + ".\r\n")
func buildErrorLine(text string) []byte {
return []byte(string(typeError) + text + "\r\n" + ".\r\n")
}
// appendFileListing formats and appends a new file entry as part of a directory listing
func appendFileListing(b []byte, file os.FileInfo, p *core.Path) []byte {
// Handle file type
switch {
case file.Mode()&os.ModeDir != 0:
return append(b, buildLine(typeDirectory, file.Name(), p.Selector(), core.Hostname, core.FwdPort)...)
case file.Mode()&os.ModeType == 0:
case file.Mode().IsDir():
return append(b, buildLine(typeDirectory, file.Name(), p.Selector()+"/", core.Hostname, core.Port)...)
case file.Mode().IsRegular():
t := getItemType(file.Name())
return append(b, buildLine(t, file.Name(), p.Selector(), core.Hostname, core.FwdPort)...)
return append(b, buildLine(t, file.Name(), p.Selector(), core.Hostname, core.Port)...)
default:
return b
}

@ -3,20 +3,17 @@ package gopher
import (
"gophi/core"
"os"
)
var (
// subgophermapSizeMax specifies the maximum size of an included subgophermap
subgophermapSizeMax int64
"github.com/grufwub/go-errors"
)
// GophermapSection is an interface that specifies individually renderable (and writeable) sections of a gophermap
type gophermapSection interface {
RenderAndWrite(*core.Client) core.Error
RenderAndWrite(*core.Client) error
}
// readGophermap reads a FD and Path as gophermap sections
func readGophermap(fd *os.File, p *core.Path) ([]gophermapSection, core.Error) {
func readGophermap(file *os.File, p *core.Path) ([]gophermapSection, error) {
// Create return slice
sections := make([]gophermapSection, 0)
@ -26,12 +23,12 @@ func readGophermap(fd *os.File, p *core.Path) ([]gophermapSection, core.Error) {
}
// Declare variables
var returnErr core.Error
var returnErr error
titleAlready := false
// Perform scan of gophermap FD
scanErr := core.FileSystem.ScanFile(
fd,
scanErr := core.ScanFile(
file,
func(line string) bool {
// Parse the line item type and handle
lineType := parseLineType(line)
@ -48,7 +45,7 @@ func readGophermap(fd *os.File, p *core.Path) ([]gophermapSection, core.Error) {
titleAlready = true
return true
}
returnErr = core.NewError(InvalidGophermapErr)
returnErr = errInvalidGophermap.Extendf("%s multiple title declarations", p.Absolute())
return false
case typeComment:
@ -61,29 +58,34 @@ func readGophermap(fd *os.File, p *core.Path) ([]gophermapSection, core.Error) {
return true
case typeSubGophermap:
// Parse new Path and parameters
request := core.ParseInternalRequest(p, line[1:])
// Parse encoded URI
path, query, returnErr := core.ParseEncodedURI(line[1:])
if returnErr != nil {
return false
} else if request.Path().Relative() == "" || request.Path().Relative() == p.Relative() {
returnErr = core.NewError(InvalidGophermapErr)
}
// Build new request. If empty relative path, or relative
// equal to current gophermap (recurse!!!) we return error
request := core.NewRequest(core.BuildPath(path), query)
if request.Path().Relative() == "" || request.Path().Relative() == p.Relative() {
returnErr = errInvalidGophermap.Extendf("%s invalid subgophermap '%s'", p.Absolute(), request.Path().Absolute())
return false
}
// Open FD
var subFD *os.File
subFD, returnErr = core.FileSystem.OpenFile(request.Path())
// Open sub gophermap
var subFile *os.File
subFile, returnErr = core.OpenFile(request.Path())
if returnErr != nil {
return false
}
// Get stat
stat, err := subFD.Stat()
stat, err := subFile.Stat()
if err != nil {
returnErr = core.WrapError(core.FileStatErr, err)
returnErr = errors.With(err).WrapWithin(core.ErrFileStat)
return false
} else if stat.IsDir() {
returnErr = core.NewError(SubgophermapIsDirErr)
returnErr = errSubgophermapIsDir.Extend(request.Path().Absolute())
return false
}
@ -95,7 +97,7 @@ func readGophermap(fd *os.File, p *core.Path) ([]gophermapSection, core.Error) {
// Error out if file too big
if stat.Size() > subgophermapSizeMax {
returnErr = core.NewError(SubgophermapSizeErr)
returnErr = errSubgophermapSize.Extendf("%s %.2fMB", request.Path().Absolute(), stat.Size()/1000.0)
return false
}
@ -143,7 +145,7 @@ type TextSection struct {
}
// RenderAndWrite simply writes the byte slice to the client
func (s *TextSection) RenderAndWrite(client *core.Client) core.Error {
func (s *TextSection) RenderAndWrite(client *core.Client) error {
return client.Conn().Write(s.contents)
}
@ -154,8 +156,8 @@ type DirectorySection struct {
}
// RenderAndWrite scans and renders a list of the contents of a directory (skipping hidden or restricted files)
func (s *DirectorySection) RenderAndWrite(client *core.Client) core.Error {
fd, err := core.FileSystem.OpenFile(s.path)
func (s *DirectorySection) RenderAndWrite(client *core.Client) error {
file, err := core.OpenFile(s.path)
if err != nil {
return err
}
@ -164,7 +166,7 @@ func (s *DirectorySection) RenderAndWrite(client *core.Client) core.Error {
dirContents := make([]byte, 0)
// Scan directory and build lines
err = core.FileSystem.ScanDirectory(fd, s.path, func(file os.FileInfo, p *core.Path) {
err = core.ScanDirectory(file, s.path, func(file os.FileInfo, p *core.Path) {
// Ignore hidden files!
_, ok := s.hidden[file.Name()]
if ok {
@ -188,9 +190,9 @@ type FileSection struct {
}
// RenderAndWrite simply opens, reads and writes the file contents to the client
func (s *FileSection) RenderAndWrite(client *core.Client) core.Error {
func (s *FileSection) RenderAndWrite(client *core.Client) error {
// Open FD for the file
fd, err := core.FileSystem.OpenFile(s.path)
file, err := core.OpenFile(s.path)
if err != nil {
return err
}
@ -199,8 +201,8 @@ func (s *FileSection) RenderAndWrite(client *core.Client) core.Error {
b := make([]byte, 0)
// Scan the file contents, format for gophermap, append to byte slice
err = core.FileSystem.ScanFile(
fd,
err = core.ScanFile(
file,
func(line string) bool {
b = append(b, buildInfoLine(line)...)
return true
@ -220,15 +222,15 @@ type SubgophermapSection struct {
}
// RenderAndWrite reads, renders and writes the contents of the gophermap to the client
func (s *SubgophermapSection) RenderAndWrite(client *core.Client) core.Error {
func (s *SubgophermapSection) RenderAndWrite(client *core.Client) error {
// Get FD for gophermap
fd, err := core.FileSystem.OpenFile(s.path)
file, err := core.OpenFile(s.path)
if err != nil {
return err
}
// Read gophermap into sections
sections, err := readGophermap(fd, s.path)
sections, err := readGophermap(file, s.path)
if err != nil {
return err
}
@ -250,6 +252,6 @@ type CGISection struct {
}
// RenderAndWrite takes the request, and executes the associated CGI script with parameters
func (s *CGISection) RenderAndWrite(client *core.Client) core.Error {
return core.ExecuteCGIScript(client, s.request)
func (s *CGISection) RenderAndWrite(client *core.Client) error {
return core.TryExecuteCGIScript(client, s.request)
}

@ -14,7 +14,7 @@ func generateHTMLRedirect(url string) []byte {
"<p>\n" +
"The URL linked is <A HREF=\"" + url + "\">" + url + "</A>\n" +
"<p>\n" +
"Thanks for using Gophor!\n" +
"Thanks for using Gophi!\n" +
"</body>\n" +
"</html>\n"

@ -1,6 +1,9 @@
package gopher
import "strings"
import (
"path"
"strings"
)
// ItemType specifies a gopher item type char
type ItemType byte
@ -49,7 +52,7 @@ const (
// Internal item types
const (
typeDefault = typeBin
typeInfoNotStated = ItemType('I')
typeInfoNotStated = ItemType('\x00')
typeUnknown = ItemType('?')
)
@ -110,6 +113,7 @@ var fileExtMap = map[string]ItemType{
".jpeg": typeImage,
".png": typeImage,
".gif": typeImage,
".webp": typeImage,
".html": typeHTML,
".htm": typeHTML,
@ -134,30 +138,25 @@ var fileExtMap = map[string]ItemType{
// getItemType is an internal function to get an ItemType for a file name string
func getItemType(name string) ItemType {
// Split, name MUST be lower
split := strings.Split(strings.ToLower(name), ".")
// First we look at how many '.' in name string
splitLen := len(split)
switch splitLen {
case 0:
// We cannot tell the file type, return default
// Get file extension (lower!)
ext := strings.ToLower(path.Ext(name))
// Empty, cannot tell so return default
if ext == "" {
return typeDefault
}
default:
// get index of str after last '.', look up in fileExtMap
fileType, ok := fileExtMap["."+split[splitLen-1]]
if ok {
return fileType
}
// Lookup in map, return value or default
itemType, ok := fileExtMap[ext]
if !ok {
return typeDefault
}
return itemType
}
// parseLineType parses a gophermap's line type based on first char and contents
func parseLineType(line string) ItemType {
lineLen := len(line)
if lineLen == 0 {
return typeInfoNotStated
}
@ -167,7 +166,6 @@ func parseLineType(line string) ItemType {
if lineLen == 1 {
// The only accepted types for length 1 line below:
t := ItemType(line[0])
if t == typeEnd ||
t == typeEndBeginList ||
t == typeComment ||
@ -192,5 +190,6 @@ func parseLineType(line string) ItemType {
return typeInfoNotStated
}
// Return as-is
return t
}

@ -1,42 +1,58 @@
package gopher
import (
"flag"
"gophi/core"
"net"
"github.com/grufwub/go-config"
"github.com/grufwub/go-filecache"
)
// setup parses gopher specific flags, and all core flags, preparing server for .Run()
func setup() {
pWidth := flag.Uint(pageWidthFlagStr, 80, pageWidthDescStr)
footerText := flag.String(footerTextFlagStr, "Gophi, a Gopher server in Go!", footerTextDescStr)
subgopherSizeMax := flag.Float64(subgopherSizeMaxFlagStr, 1.0, subgopherSizeMaxDescStr)
admin := flag.String(adminFlagStr, "", adminDescStr)
desc := flag.String(descFlagStr, "", descDescStr)
geo := flag.String(geoFlagStr, "", geoDescStr)
core.ParseFlagsAndSetup("gopher", generateErrorMessage)
// Run does as says :)
func Run() {
// Create new TOML config parser
tree := make(config.Tree)
// Parse gopher specific flags, then all
pWidth := tree.Uint64("gopher.page-width", 80)
footerText := tree.String("gopher.footer", "")
subgopherSizeMax := tree.Float64("gopher.subgopher-max", 1.0)
admin := tree.String("gopher.admin-email", "")
desc := tree.String("gopher.description", "")
geo := tree.String("gopher.geolocation", "")
core.ParseConfigAndSetup(
tree,
"gopher",
70,
func() (*core.Listener, error) {
l, err := net.Listen("tcp", core.Bind+":"+core.Port)
if err != nil {
return nil, err
}
return core.NewListener(l), nil
},
newFileContent,
handleDirectory,
handleLargeFile,
appendCgiEnv,
)
// Setup gopher specific global variables
subgophermapSizeMax = int64(1048576.0 * *subgopherSizeMax) // convert float to megabytes
pageWidth = int(*pWidth)
footer = buildFooter(*footerText)
gophermapRegex = compileGophermapRegex()
// Add generated files to cache if not present
p := core.NewPath(core.Root, "caps.txt")
if _, err := core.FileSystem.StatFile(p); err != nil {
core.SystemLog.Info("Policy file %s not found! Generating...", p.Absolute())
core.FileSystem.AddGeneratedFile(p, generateCapsTxt(*desc, *admin, *geo))
}
p = core.NewPath(core.Root, "robots.txt")
if _, err := core.FileSystem.StatFile(p); err != nil {
core.SystemLog.Info("Policy file %s not found! Generating...", p.Absolute())
core.FileSystem.AddGeneratedFile(p, generateRobotsTxt())
}
}
// Run does as says :)
func Run() {
setup()
// Add generated policy file to cache
p := core.NewSanitizedPathAtRoot(core.Root, "caps.txt")
core.SystemLog.Infof("Generating policy file %s...", p.Absolute())
core.FileCache.Put(filecache.NewFile(p.Absolute(), false, &generatedFileContent{generateCapsTxt(*desc, *admin, *geo)}))
// Remove things we don't need hanging around
desc = nil
admin = nil
geo = nil
tree = nil
// Start!
core.Start(serve)
}

@ -11,7 +11,8 @@ func generatePolicyHeader(name string) string {
text := "# This is an automatically generated" + "\r\n"
text += "# server policy file: " + name + "\r\n"
text += "#" + "\r\n"
text += "# BlackLivesMatter" + "\r\n"
text += "# ACAB\r\n"
text += "# BlackLivesMatter\r\n"
return text
}
@ -27,9 +28,6 @@ func generateCapsTxt(desc, admin, geo string) []byte {
text += "PathDelimeter=/" + "\r\n"
text += "PathIdentity=." + "\r\n"
text += "PathParent=.." + "\r\n"
text += "PathParentDouble=FALSE" + "\r\n"
text += "PathEscapeCharacter=\\" + "\r\n"
text += "PathKeepPreDelimeter=FALSE" + "\r\n"
text += "\r\n"
text += "ServerSoftware=Gophi" + "\r\n"
text += "ServerSoftwareVersion=" + core.Version + "\r\n"

@ -5,15 +5,8 @@ import (
"regexp"
)
var (
// gophermapRegex is the precompiled gophermap file name regex check
gophermapRegex *regexp.Regexp
)
// compileGophermapRegex compiles the gophermap file name check regex
func compileGophermapRegex() *regexp.Regexp {
return regexp.MustCompile(`^(|.+/|.+\.)gophermap$`)
}
// gophermapRegex is the precompiled gophermap file name regex check
var gophermapRegex = regexp.MustCompile(`^(|.+/|.+\.)gophermap$`)
// isGophermap checks against gophermap regex as to whether a file path is a gophermap
func isGophermap(path *core.Path) bool {

@ -6,119 +6,128 @@ import (
"strings"
)
var (
// Gophermap formatting globals
subgophermapSizeMax int64
pageWidth int
footer []byte
)
// serve is the global gopher server's serve function
func serve(client *core.Client) {
// Receive line from client
received, err := client.Conn().ReadLine()
if err != nil {
client.LogError(clientReadFailStr)
client.LogError("Conn read fail")
handleError(client, err)
return
}
// Convert to string + remove leading '/'
line := strings.TrimPrefix(string(received), "/")
// Split up to first tab in case we've been
// given index search query (which we use to set CGI env),
// or extra Gopher+ information (which we don't care about)
raw, extra := core.SplitBy(string(received), "\t")
// If prefixed by 'URL:' send a redirect
lenBefore := len(line)
line = strings.TrimPrefix(line, "URL:")
if len(line) < lenBefore {
client.Conn().Write(generateHTMLRedirect(line))
client.LogInfo(clientRedirectFmtStr, line)
// Ensure we've received a valid URL string
if core.HasAsciiControlBytes(raw) {
client.LogError("Invalid request: %s", raw)
handleError(client, core.ErrInvalidRequest.Extendf("%s has ascii control bytes", raw))
return
}
// Parse new request
request, err := core.ParseURLEncodedRequest(line)
// Parse the encoded URI into path and query components
path, query, err := core.ParseEncodedURI(raw)
if err != nil {
client.LogError(clientRequestParseFailStr)
client.LogError("Invalid request: %s", raw)
handleError(client, err)
return
}
// If prefixed by 'URL:' send a redirect
if strings.HasPrefix(path, "/URL:") {
raw = raw[5:]
client.Conn().Write(generateHTMLRedirect(raw))
client.LogInfo("Redirect to: %s", raw)
return
}
// Create new request and add the extra query part
request := core.NewRequest(core.BuildPath(path), query)
request.AddToQuery(extra)
// Handle the request!
err = core.FileSystem.HandleClient(
// Current client
client,
// Current request
request,
// New file contents function
newFileContents,
// Handle directory function
func(fs *core.FileSystemObject, client *core.Client, fd *os.File, p *core.Path) core.Error {
// First check for gophermap, create gophermap Path object
gophermap := p.JoinPath("gophermap")
// If gophermap exists, we fetch this
fd2, err := fs.OpenFile(gophermap)
if err == nil {
stat, osErr := fd2.Stat()
if osErr == nil {
// Fetch gophermap and defer close
defer fd2.Close()
return fs.FetchFile(client, fd2, stat, gophermap, newFileContents)
}
// Else, just close fd2
fd2.Close()
}
// Slice to write
dirContents := make([]byte, 0)
// Add directory heading + empty line
dirContents = append(dirContents, buildLine(typeInfo, "[ "+core.Hostname+p.Selector()+" ]", "TITLE", nullHost, nullPort)...)
dirContents = append(dirContents, buildInfoLine("")...)
// Scan directory and build lines
err = fs.ScanDirectory(
// Directory fd
fd,
// Directory path
p,
// Iter function
func(file os.FileInfo, fp *core.Path) {
// Append new formatted file listing (if correct type)
dirContents = appendFileListing(dirContents, file, fp)
},
)
if err != nil {
return err
}
// Add footer, write contents
dirContents = append(dirContents, footer...)
return client.Conn().Write(dirContents)
},
)
err = core.HandleClient(client, request)
// Final error handling
if err != nil {
handleError(client, err)
client.LogError(clientServeFailStr, request.Path().Absolute())
client.LogError("Failed to serve: %s", request.String())
} else {
client.LogInfo(clientServedStr, request.Path().Absolute())
client.LogInfo("Served: %s", request.String())
}
}
func handleDirectory(client *core.Client, file *os.File, p *core.Path) error {
// First check for gophermap, create gophermap Path object
gophermap := p.JoinPathUnsafe("gophermap")
// If gophermap exists, we fetch this
file2, err := core.OpenFile(gophermap)
if err == nil {
stat, osErr := file2.Stat()
if osErr == nil {
// Fetch gophermap and defer close
defer file2.Close()
return core.FetchFile(client, file2, stat, gophermap)
}
// Else, just close file2
file2.Close()
}
// Slice to write
dirContents := make([]byte, 0)
// Add directory heading, empty line and a back line
dirContents = append(dirContents, buildLine(typeInfo, "[ "+core.Hostname+p.Selector()+" ]", "TITLE", nullHost, nullPort)...)
dirContents = append(dirContents, buildInfoLine("")...)
dirContents = append(dirContents, buildLine(typeDirectory, "..", p.Selector(), core.Hostname, core.Port)...)
// Scan directory and build lines
err = core.ScanDirectory(
file,
p,
func(file os.FileInfo, fp *core.Path) {
// Append new formatted file listing (if correct type)
dirContents = appendFileListing(dirContents, file, fp)
},
)
if err != nil {
return err
}
// Add footer, write contents
dirContents = append(dirContents, footer...)
return client.Conn().Write(dirContents)
}
func handleLargeFile(client *core.Client, file *os.File, p *core.Path) error {
return client.Conn().ReadFrom(file)
}
// handleError determines whether to send an error response to the client, and logs to system
func handleError(client *core.Client, err core.Error) {
response, ok := generateErrorResponse(err.Code())
func handleError(client *core.Client, err error) {
response, ok := generateErrorResponse(err)
if ok {
client.Conn().Write(response)
}
core.SystemLog.Error(err.Error())
core.SystemLog.Errorf(err.Error())
}
// newFileContents returns a new FileContents object
func newFileContents(p *core.Path) core.FileContents {
func newFileContent(p *core.Path) core.FileContent {
if isGophermap(p) {
return &gophermapContents{}
return &gophermapContent{}
}
return &core.RegularFileContents{}
return &core.RegularFileContent{}
}

@ -1,49 +0,0 @@
package gopher
// Client error response strings
const (
errorResponse400 = "400 Bad Request"
errorResponse401 = "401 Unauthorised"
errorResponse403 = "403 Forbidden"
errorResponse404 = "404 Not Found"
errorResponse408 = "408 Request Time-out"
errorResponse410 = "410 Gone"
errorResponse500 = "500 Internal Server Error"
errorResponse501 = "501 Not Implemented"
errorResponse503 = "503 Service Unavailable"
)
// Gopher flag string constants
const (
pageWidthFlagStr = "page-width"
pageWidthDescStr = "Gopher page width"
footerTextFlagStr = "footer-text"
footerTextDescStr = "Footer text (empty to disable)"
subgopherSizeMaxFlagStr = "subgopher-size-max"
subgopherSizeMaxDescStr = "Subgophermap size max (megabytes)"
adminFlagStr = "admin"
adminDescStr = "Generated policy file admin email"
descFlagStr = "description"
descDescStr = "Generated policy file server description"
geoFlagStr = "geolocation"
geoDescStr = "Generated policy file server geolocation"
)
// Log string constants
const (
clientReadFailStr = "Failed to read"
clientRedirectFmtStr = "Redirecting to: %s"
clientRequestParseFailStr = "Failed to parse request"
clientServeFailStr = "Failed to serve: %s"
clientServedStr = "Served: %s"
invalidGophermapErrStr = "Invalid gophermap"
subgophermapIsDirErrStr = "Subgophermap path is dir"
subgophermapSizeErrStr = "Subgophermap size too large"
unknownErrStr = "Unknown error code"
)
Loading…
Cancel
Save