parent
ebe414d8f3
commit
75cfbc0c65
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
CC='x86_64-linux-musl-gcc' CGO_ENABLED=1 go build -trimpath -buildmode 'pie' -a -tags 'netgo' -ldflags '-s -w -extldflags "-static"' -o gophor.gopher main_gopher.go
|
@ -1,46 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* ServerConfig:
|
|
||||||
* Holds onto global server configuration details
|
|
||||||
* and any data objects we want to keep in memory
|
|
||||||
* (e.g. loggers, restricted files regular expressions
|
|
||||||
* and file cache)
|
|
||||||
*/
|
|
||||||
type ServerConfig struct {
|
|
||||||
/* Executable Settings */
|
|
||||||
Env []string
|
|
||||||
CgiEnv []string
|
|
||||||
CgiEnabled bool
|
|
||||||
MaxExecRunTime time.Duration
|
|
||||||
|
|
||||||
/* Content settings */
|
|
||||||
FooterText []byte
|
|
||||||
PageWidth int
|
|
||||||
|
|
||||||
/* Logging */
|
|
||||||
SysLog LoggerInterface
|
|
||||||
AccLog LoggerInterface
|
|
||||||
|
|
||||||
/* Filesystem access */
|
|
||||||
FileSystem *FileSystem
|
|
||||||
|
|
||||||
/* Buffer sizes */
|
|
||||||
SocketWriteBufSize int
|
|
||||||
SocketReadBufSize int
|
|
||||||
SocketReadMax int
|
|
||||||
SkipPrefixBufSize int
|
|
||||||
FileReadBufSize int
|
|
||||||
|
|
||||||
/* Socket deadlines */
|
|
||||||
SocketReadDeadline time.Duration
|
|
||||||
SocketWriteDeadline time.Duration
|
|
||||||
|
|
||||||
/* Precompiled regular expressions */
|
|
||||||
RgxGophermap *regexp.Regexp
|
|
||||||
RgxCgiBin *regexp.Regexp
|
|
||||||
}
|
|
@ -1,175 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
"bufio"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ConnHost struct {
|
|
||||||
/* Hold host specific details */
|
|
||||||
name string
|
|
||||||
hostport string
|
|
||||||
fwdport string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (host *ConnHost) Name() string {
|
|
||||||
return host.name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (host *ConnHost) Port() string {
|
|
||||||
return host.fwdport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (host *ConnHost) RealPort() string {
|
|
||||||
return host.hostport
|
|
||||||
}
|
|
||||||
|
|
||||||
type ConnClient struct {
|
|
||||||
/* Hold client specific details */
|
|
||||||
ip string
|
|
||||||
port string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *ConnClient) Ip() string {
|
|
||||||
return client.ip
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *ConnClient) Port() string {
|
|
||||||
return client.port
|
|
||||||
}
|
|
||||||
|
|
||||||
func (client *ConnClient) AddrStr() string {
|
|
||||||
return client.Ip()+":"+client.Port()
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophorListener struct {
|
|
||||||
/* Simple net.Listener wrapper that holds onto virtual
|
|
||||||
* host information + generates Worker instances on Accept()
|
|
||||||
*/
|
|
||||||
|
|
||||||
Listener net.Listener
|
|
||||||
Host *ConnHost
|
|
||||||
Root string
|
|
||||||
}
|
|
||||||
|
|
||||||
func BeginGophorListen(bindAddr, hostname, port, fwdPort, rootDir string) (*GophorListener, error) {
|
|
||||||
gophorListener := new(GophorListener)
|
|
||||||
gophorListener.Host = &ConnHost{ hostname, port, fwdPort }
|
|
||||||
gophorListener.Root = rootDir
|
|
||||||
|
|
||||||
var err error
|
|
||||||
gophorListener.Listener, err = net.Listen("tcp", bindAddr+":"+port)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else {
|
|
||||||
return gophorListener, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *GophorListener) Accept() (*Worker, error) {
|
|
||||||
conn, err := l.Listener.Accept()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Should always be ok as listener is type TCP (see above) */
|
|
||||||
addr, _ := conn.RemoteAddr().(*net.TCPAddr)
|
|
||||||
client := &ConnClient{ addr.IP.String(), strconv.Itoa(addr.Port) }
|
|
||||||
|
|
||||||
return &Worker{ NewBufferedDeadlineConn(conn), l.Host, client, l.Root }, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type DeadlineConn struct {
|
|
||||||
/* Simple wrapper to net.Conn that sets deadlines
|
|
||||||
* on each call to Read() / Write()
|
|
||||||
*/
|
|
||||||
|
|
||||||
conn net.Conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDeadlineConn(conn net.Conn) *DeadlineConn {
|
|
||||||
return &DeadlineConn{ conn }
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *DeadlineConn) Read(b []byte) (int, error) {
|
|
||||||
/* Implements a regular net.Conn + updates deadline */
|
|
||||||
c.conn.SetReadDeadline(time.Now().Add(Config.SocketReadDeadline))
|
|
||||||
return c.conn.Read(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *DeadlineConn) Write(b []byte) (int, error) {
|
|
||||||
/* Implements a regular net.Conn + updates deadline */
|
|
||||||
c.conn.SetWriteDeadline(time.Now().Add(Config.SocketWriteDeadline))
|
|
||||||
return c.conn.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *DeadlineConn) Close() error {
|
|
||||||
/* Close */
|
|
||||||
return c.conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
type BufferedDeadlineConn struct {
|
|
||||||
/* Wrapper around DeadlineConn that provides buffered
|
|
||||||
* reads and writes.
|
|
||||||
*/
|
|
||||||
|
|
||||||
conn *DeadlineConn
|
|
||||||
buffer *bufio.ReadWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBufferedDeadlineConn(conn net.Conn) *BufferedDeadlineConn {
|
|
||||||
deadlineConn := NewDeadlineConn(conn)
|
|
||||||
return &BufferedDeadlineConn{
|
|
||||||
deadlineConn,
|
|
||||||
bufio.NewReadWriter(
|
|
||||||
bufio.NewReaderSize(deadlineConn, Config.SocketReadBufSize),
|
|
||||||
bufio.NewWriterSize(deadlineConn, Config.SocketWriteBufSize),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BufferedDeadlineConn) ReadLine() ([]byte, error) {
|
|
||||||
/* Return slice */
|
|
||||||
b := make([]byte, 0)
|
|
||||||
|
|
||||||
for len(b) < Config.SocketReadMax {
|
|
||||||
/* Read line */
|
|
||||||
line, isPrefix, err := c.buffer.ReadLine()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add to return slice */
|
|
||||||
b = append(b, line...)
|
|
||||||
|
|
||||||
/* If !isPrefix, we can break-out */
|
|
||||||
if !isPrefix {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BufferedDeadlineConn) Write(b []byte) (int, error) {
|
|
||||||
return c.buffer.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BufferedDeadlineConn) WriteData(b []byte) error {
|
|
||||||
_, err := c.buffer.Write(b)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BufferedDeadlineConn) WriteRaw(r io.Reader) error {
|
|
||||||
_, err := c.buffer.ReadFrom(r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *BufferedDeadlineConn) Close() error {
|
|
||||||
/* First flush buffer, then close */
|
|
||||||
c.buffer.Flush()
|
|
||||||
return c.conn.Close()
|
|
||||||
}
|
|
@ -0,0 +1,77 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import "container/list"
|
||||||
|
|
||||||
|
// element wraps a map key and value
|
||||||
|
type element struct {
|
||||||
|
key string
|
||||||
|
value *file
|
||||||
|
}
|
||||||
|
|
||||||
|
// lruCacheMap is a fixed-size LRU hash map
|
||||||
|
type lruCacheMap struct {
|
||||||
|
hashMap map[string]*list.Element
|
||||||
|
list *list.List
|
||||||
|
size int
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLRUCacheMap returns a new LRUCacheMap of specified size
|
||||||
|
func newLRUCacheMap(size int) *lruCacheMap {
|
||||||
|
return &lruCacheMap{
|
||||||
|
// size+1 to account for moment during put after adding new value but before old value is purged
|
||||||
|
make(map[string]*list.Element, size+1),
|
||||||
|
&list.List{},
|
||||||
|
size,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns file from LRUCacheMap for key
|
||||||
|
func (lru *lruCacheMap) Get(key string) (*file, bool) {
|
||||||
|
lElem, ok := lru.hashMap[key]
|
||||||
|
if !ok {
|
||||||
|
return nil, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move element to front of the list
|
||||||
|
lru.list.MoveToFront(lElem)
|
||||||
|
|
||||||
|
// Get Element and return *File value from it
|
||||||
|
element, _ := lElem.Value.(*element)
|
||||||
|
return element.value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put file in LRUCacheMap at key
|
||||||
|
func (lru *lruCacheMap) Put(key string, value *file) {
|
||||||
|
lElem := lru.list.PushFront(&element{key, value})
|
||||||
|
lru.hashMap[key] = lElem
|
||||||
|
|
||||||
|
if lru.list.Len() > lru.size {
|
||||||
|
// Get element at back of list and Element from it
|
||||||
|
lElem = lru.list.Back()
|
||||||
|
element, _ := lElem.Value.(*element)
|
||||||
|
|
||||||
|
// Delete entry in hashMap with key from Element, and from list
|
||||||
|
delete(lru.hashMap, element.key)
|
||||||
|
lru.list.Remove(lElem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove file in LRUCacheMap with key
|
||||||
|
func (lru *lruCacheMap) Remove(key string) {
|
||||||
|
lElem, ok := lru.hashMap[key]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete entry in hashMap and list
|
||||||
|
delete(lru.hashMap, key)
|
||||||
|
lru.list.Remove(lElem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate performs an iteration over all key:value pairs in LRUCacheMap with supplied function
|
||||||
|
func (lru *lruCacheMap) Iterate(iterator func(key string, value *file)) {
|
||||||
|
for key := range lru.hashMap {
|
||||||
|
element, _ := lru.hashMap[key].Value.(*element)
|
||||||
|
iterator(element.key, element.value)
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,331 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// cgiEnv holds the global slice of constant CGI environment variables
|
||||||
|
cgiEnv []string
|
||||||
|
|
||||||
|
// maxCGIRunTime specifies the maximum time a CGI script can run for
|
||||||
|
maxCGIRunTime time.Duration
|
||||||
|
|
||||||
|
// httpPrefixBufSize specifies size of the buffer to use when skipping HTTP headers
|
||||||
|
httpPrefixBufSize int
|
||||||
|
|
||||||
|
// ExecuteCGIScript is a pointer to the currently set CGI execution function
|
||||||
|
ExecuteCGIScript func(*Client, *Request) Error
|
||||||
|
)
|
||||||
|
|
||||||
|
// setupInitialCGIEnv takes a safe PATH, uses other server variables and returns a slice of constant CGI environment variables
|
||||||
|
func setupInitialCGIEnv(safePath string) []string {
|
||||||
|
env := make([]string, 0)
|
||||||
|
|
||||||
|
SystemLog.Info("CGI safe path: %s", safePath)
|
||||||
|
env = append(env, "PATH="+safePath)
|
||||||
|
env = append(env, "SERVER_NAME="+Hostname)
|
||||||
|
env = append(env, "SERVER_PORT="+FwdPort)
|
||||||
|
env = append(env, "DOCUMENT_ROOT="+Root)
|
||||||
|
|
||||||
|
return env
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateCGIEnv takes a Client, and Request object, the global constant slice and generates a full set of CGI environment variables
|
||||||
|
func generateCGIEnv(client *Client, request *Request) []string {
|
||||||
|
env := append(cgiEnv, "REMOTE_ADDR="+client.IP())
|
||||||
|
env = append(env, "QUERY_STRING="+request.Params())
|
||||||
|
env = append(env, "SCRIPT_NAME="+request.Path().Relative())
|
||||||
|
env = append(env, "SCRIPT_FILENAME="+request.Path().Absolute())
|
||||||
|
env = append(env, "SELECTOR="+request.Path().Selector())
|
||||||
|
env = append(env, "REQUEST_URI="+request.Path().Selector())
|
||||||
|
|
||||||
|
return env
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeCGIScriptNoHTTP executes a CGI script, responding with output to client without stripping HTTP headers
|
||||||
|
func executeCGIScriptNoHTTP(client *Client, request *Request) Error {
|
||||||
|
return execute(client.Conn().Writer(), request.Path(), generateCGIEnv(client, request))
|
||||||
|
}
|
||||||
|
|
||||||
|
// executeCGIScriptStripHTTP executes a CGI script, responding with output to client, stripping HTTP headers and handling status code
|
||||||
|
func executeCGIScriptStripHTTP(client *Client, request *Request) Error {
|
||||||
|
// Create new httpStripWriter
|
||||||
|
httpWriter := newhttpStripWriter(client.Conn().Writer())
|
||||||
|
|
||||||
|
// Begin executing script
|
||||||
|
err := execute(httpWriter, request.Path(), generateCGIEnv(client, request))
|
||||||
|
|
||||||
|
// Parse HTTP headers (if present). Return error or continue letting output of script -> client
|
||||||
|
cgiStatusErr := httpWriter.FinishUp()
|
||||||
|
if cgiStatusErr != nil {
|
||||||
|
return cgiStatusErr
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// execute executes something at Path, with supplied environment and ouputing to writer
|
||||||
|
func execute(writer io.Writer, p *Path, env []string) Error {
|
||||||
|
// Create cmd object
|
||||||
|
cmd := exec.Command(p.Absolute())
|
||||||
|
|
||||||
|
// Set new process group id
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||||
|
|
||||||
|
// Setup cmd environment
|
||||||
|
cmd.Env, cmd.Dir = env, p.Root()
|
||||||
|
|
||||||
|
// Setup cmd out writer
|
||||||
|
cmd.Stdout = writer
|
||||||
|
|
||||||
|
// Start executing
|
||||||
|
err := cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
return WrapError(CGIStartErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup goroutine to kill cmd after maxCGIRunTime
|
||||||
|
go func() {
|
||||||
|
// At least let the script try to finish...
|
||||||
|
time.Sleep(maxCGIRunTime)
|
||||||
|
|
||||||
|
// We've already finished
|
||||||
|
if cmd.ProcessState != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get process group id
|
||||||
|
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
||||||
|
if err != nil {
|
||||||
|
SystemLog.Fatal(pgidNotFoundErrStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Kill process group!
|
||||||
|
err = syscall.Kill(-pgid, syscall.SIGTERM)
|
||||||
|
if err != nil {
|
||||||
|
SystemLog.Fatal(pgidStopErrStr, pgid, err.Error())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Wait for command to finish, get exit code
|
||||||
|
err = cmd.Wait()
|
||||||
|
exitCode := 0
|
||||||
|
if err != nil {
|
||||||
|
// Error, try to get exit code
|
||||||
|
exitError, ok := err.(*exec.ExitError)
|
||||||
|
if ok {
|
||||||
|
waitStatus := exitError.Sys().(syscall.WaitStatus)
|
||||||
|
exitCode = waitStatus.ExitStatus()
|
||||||
|
} else {
|
||||||
|
exitCode = 1
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No error! Get exit code directly from command process state
|
||||||
|
waitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus)
|
||||||
|
exitCode = waitStatus.ExitStatus()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Non-zero exit code? Return error
|
||||||
|
if exitCode != 0 {
|
||||||
|
SystemLog.Error(cgiExecuteErrStr, p.Absolute(), exitCode)
|
||||||
|
return NewError(CGIExitCodeErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exit fine!
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpStripWriter wraps a writer, reading HTTP headers and parsing status code, before deciding to continue writing
|
||||||
|
type httpStripWriter struct {
|
||||||
|
writer io.Writer
|
||||||
|
skipBuffer []byte
|
||||||
|
skipIndex int
|
||||||
|
err Error
|
||||||
|
|
||||||
|
// writeFunc is a pointer to the current underlying write function
|
||||||
|
writeFunc func(*httpStripWriter, []byte) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newhttpStripWriter returns a new httpStripWriter wrapping supplied writer
|
||||||
|
func newhttpStripWriter(w io.Writer) *httpStripWriter {
|
||||||
|
return &httpStripWriter{
|
||||||
|
w,
|
||||||
|
make([]byte, httpPrefixBufSize),
|
||||||
|
0,
|
||||||
|
nil,
|
||||||
|
writeCheckForHeaders,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addToSkipBuffer adds supplied bytes to the skip buffer, returning number added
|
||||||
|
func (w *httpStripWriter) addToSkipBuffer(data []byte) int {
|
||||||
|
// Figure out amount to add
|
||||||
|
toAdd := len(w.skipBuffer) - w.skipIndex
|
||||||
|
if len(data) < toAdd {
|
||||||
|
toAdd = len(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add data to skip buffer, return added
|
||||||
|
copy(w.skipBuffer[w.skipIndex:], data[:toAdd])
|
||||||
|
w.skipIndex += toAdd
|
||||||
|
return toAdd
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseHTTPHeaderSection checks if we've received a valid HTTP header section, and determine if we should continue writing
|
||||||
|
func (w *httpStripWriter) parseHTTPHeaderSection() (bool, bool) {
|
||||||
|
validHeaderSection, shouldContinue := false, true
|
||||||
|
for _, header := range strings.Split(string(w.skipBuffer), "\r\n") {
|
||||||
|
header = strings.ToLower(header)
|
||||||
|
|
||||||
|
// Try look for status header
|
||||||
|
lenBefore := len(header)
|
||||||
|
header = strings.TrimPrefix(header, "status:")
|
||||||
|
if len(header) < lenBefore {
|
||||||
|
// Ensure no spaces + just number
|
||||||
|
header = strings.Split(header, " ")[0]
|
||||||
|
|
||||||
|
// Ignore 200
|
||||||
|
if header == "200" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any other value indicates error, should not continue
|
||||||
|
shouldContinue = false
|
||||||
|
|
||||||
|
// Parse error code
|
||||||
|
code := CGIStatusUnknownErr
|
||||||
|
switch header {
|
||||||
|
case "400":
|
||||||
|
code = CGIStatus400Err
|
||||||
|
case "401":
|
||||||
|
code = CGIStatus401Err
|
||||||
|
case "403":
|
||||||
|
code = CGIStatus403Err
|
||||||
|
case "404":
|
||||||
|
code = CGIStatus404Err
|
||||||
|
case "408":
|
||||||
|
code = CGIStatus408Err
|
||||||
|
case "410":
|
||||||
|
code = CGIStatus410Err
|
||||||
|
case "500":
|
||||||
|
code = CGIStatus500Err
|
||||||
|
case "501":
|
||||||
|
code = CGIStatus501Err
|
||||||
|
case "503":
|
||||||
|
code = CGIStatus503Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set error code
|
||||||
|
w.err = NewError(code)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Found a content-type header, this is a valid header section
|
||||||
|
if strings.Contains(header, "content-type:") {
|
||||||
|
validHeaderSection = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return validHeaderSection, shouldContinue
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeSkipBuffer writes contents of skipBuffer to the underlying writer if necessary
|
||||||
|
func (w *httpStripWriter) writeSkipBuffer() (bool, error) {
|
||||||
|
// Defer resetting skipIndex
|
||||||
|
defer func() {
|
||||||
|
w.skipIndex = 0
|
||||||
|
}()
|
||||||
|
|
||||||
|
// First try parse the headers, determine next steps
|
||||||
|
validHeaders, shouldContinue := w.parseHTTPHeaderSection()
|
||||||
|
|
||||||
|
// Valid headers received, don't bother writing. Return the shouldContinue value
|
||||||
|
if validHeaders {
|
||||||
|
return shouldContinue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default is to write skip buffer contents, shouldContinue only means something with valid headers
|
||||||
|
_, err := w.writer.Write(w.skipBuffer[:w.skipIndex])
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *httpStripWriter) FinishUp() Error {
|
||||||
|
// If skipIndex not zero, try write (or at least parse and see if we need
|
||||||
|
// to write) remaining skipBuffer. (e.g. if CGI output very short)
|
||||||
|
if w.skipIndex > 0 {
|
||||||
|
w.writeSkipBuffer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return error if set
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *httpStripWriter) Write(b []byte) (int, error) {
|
||||||
|
// Write using currently set write function
|
||||||
|
return w.writeFunc(w, b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeRegular performs task of regular write function, it is a direct wrapper
|
||||||
|
func writeRegular(w *httpStripWriter, b []byte) (int, error) {
|
||||||
|
return w.writer.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeCheckForHeaders reads input data, checking for headers to add to skip buffer and parse before continuing
|
||||||
|
func writeCheckForHeaders(w *httpStripWriter, b []byte) (int, error) {
|
||||||
|
split := bytes.Split(b, []byte("\r\n\r\n"))
|
||||||
|
if len(split) == 1 {
|
||||||
|
// Headers found, try to add data to skip buffer
|
||||||
|
added := w.addToSkipBuffer(b)
|
||||||
|
|
||||||
|
if added < len(b) {
|
||||||
|
defer func() {
|
||||||
|
// Having written skip buffer, defer resetting write function
|
||||||
|
w.writeFunc = writeRegular
|
||||||
|
}()
|
||||||
|
|
||||||
|
doContinue, err := w.writeSkipBuffer()
|
||||||
|
if !doContinue {
|
||||||
|
return len(b), io.EOF
|
||||||
|
} else if err != nil {
|
||||||
|
return added, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write remaining data not added to skip buffer
|
||||||
|
count, err := w.writer.Write(b[added:])
|
||||||
|
if err != nil {
|
||||||
|
return added + count, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// No use for skip buffer after belo, set write to regular
|
||||||
|
w.writeFunc = writeRegular
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Try add what we can to skip buffer
|
||||||
|
added := w.addToSkipBuffer(append(split[0], []byte("\r\n\r\n")...))
|
||||||
|
|
||||||
|
// Write skip buffer data if necessary, check if we should continue
|
||||||
|
doContinue, err := w.writeSkipBuffer()
|
||||||
|
if !doContinue {
|
||||||
|
return len(b), io.EOF
|
||||||
|
} else if err != nil {
|
||||||
|
return added, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write remaining data not added to skip buffer, to writer
|
||||||
|
count, err := w.writer.Write(b[added:])
|
||||||
|
if err != nil {
|
||||||
|
return added + count, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(b), nil
|
||||||
|
}
|
@ -0,0 +1,45 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client holds onto an open Conn to a client, along with connection information
|
||||||
|
type Client struct {
|
||||||
|
cn *conn
|
||||||
|
ip *net.IP
|
||||||
|
port string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a new client based on supplied net.TCPConn
|
||||||
|
func NewClient(conn *net.TCPConn) *Client {
|
||||||
|
addr, _ := conn.RemoteAddr().(*net.TCPAddr)
|
||||||
|
ip, port := &addr.IP, strconv.Itoa(addr.Port)
|
||||||
|
return &Client{wrapConn(conn), ip, port}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conn returns the underlying conn
|
||||||
|
func (c *Client) Conn() *conn {
|
||||||
|
return c.cn
|
||||||
|
}
|
||||||
|
|
||||||
|
// IP returns the client's IP string
|
||||||
|
func (c *Client) IP() string {
|
||||||
|
return c.ip.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Port returns the client's connected port
|
||||||
|
func (c *Client) Port() string {
|
||||||
|
return c.port
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogInfo logs to the global access logger with the client IP as a prefix
|
||||||
|
func (c *Client) LogInfo(fmt string, args ...interface{}) {
|
||||||
|
AccessLog.Info("("+c.ip.String()+") "+fmt, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogError logs to the global access logger with the client IP as a prefix
|
||||||
|
func (c *Client) LogError(fmt string, args ...interface{}) {
|
||||||
|
AccessLog.Error("("+c.ip.String()+") "+fmt, args...)
|
||||||
|
}
|
@ -0,0 +1,120 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// connReadDeadline specifies the connection read deadline
|
||||||
|
connReadDeadline time.Duration
|
||||||
|
|
||||||
|
// connWriteDeadline specifies the connection write deadline
|
||||||
|
connWriteDeadline time.Duration
|
||||||
|
|
||||||
|
// connReadBufSize specifies the connection read buffer size
|
||||||
|
connReadBufSize int
|
||||||
|
|
||||||
|
// connWriteBufSize specifies the connection write buffer size
|
||||||
|
connWriteBufSize int
|
||||||
|
|
||||||
|
// connReadMax specifies the connection read max (in bytes)
|
||||||
|
connReadMax int
|
||||||
|
)
|
||||||
|
|
||||||
|
// deadlineConn wraps net.Conn to set the read / write deadlines on each access
|
||||||
|
type deadlineConn struct {
|
||||||
|
conn net.Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read wraps the underlying net.Conn read function, setting read deadline on each access
|
||||||
|
func (c *deadlineConn) Read(b []byte) (int, error) {
|
||||||
|
c.conn.SetReadDeadline(time.Now().Add(connReadDeadline))
|
||||||
|
return c.conn.Read(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read wraps the underlying net.Conn write function, setting write deadline on each access
|
||||||
|
func (c *deadlineConn) Write(b []byte) (int, error) {
|
||||||
|
c.conn.SetWriteDeadline(time.Now().Add(connWriteDeadline))
|
||||||
|
return c.conn.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close directly wraps underlying net.Conn close function
|
||||||
|
func (c *deadlineConn) Close() error {
|
||||||
|
return c.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conn wraps a DeadlineConn with a buffer
|
||||||
|
type conn struct {
|
||||||
|
buf *bufio.ReadWriter
|
||||||
|
closer io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrapConn wraps a net.Conn in DeadlineConn, then within Conn and returns the result
|
||||||
|
func wrapConn(c net.Conn) *conn {
|
||||||
|
deadlineConn := &deadlineConn{c}
|
||||||
|
buf := bufio.NewReadWriter(
|
||||||
|
bufio.NewReaderSize(deadlineConn, connReadBufSize),
|
||||||
|
bufio.NewWriterSize(deadlineConn, connWriteBufSize),
|
||||||
|
)
|
||||||
|
return &conn{buf, deadlineConn}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadLine reads a single line and returns the result, or nil and error
|
||||||
|
func (c *conn) ReadLine() ([]byte, Error) {
|
||||||
|
// return slice
|
||||||
|
b := make([]byte, 0)
|
||||||
|
|
||||||
|
for len(b) < connReadMax {
|
||||||
|
// read the line
|
||||||
|
line, isPrefix, err := c.buf.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
return nil, WrapError(ConnReadErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// append line contents to return slice
|
||||||
|
b = append(b, line...)
|
||||||
|
|
||||||
|
// if finished reading, break out
|
||||||
|
if !isPrefix {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteBytes writes a byte slice to the buffer and returns error status
|
||||||
|
func (c *conn) WriteBytes(b []byte) Error {
|
||||||
|
_, err := c.buf.Write(b)
|
||||||
|
if err != nil {
|
||||||
|
return WrapError(ConnWriteErr, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFrom writes to the buffer from a reader and returns error status
|
||||||
|
func (c *conn) WriteFrom(r io.Reader) Error {
|
||||||
|
_, err := c.buf.ReadFrom(r)
|
||||||
|
if err != nil {
|
||||||
|
return WrapError(ConnWriteErr, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer returns the underlying buffer wrapped conn writer
|
||||||
|
func (c *conn) Writer() io.Writer {
|
||||||
|
return c.buf.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close flushes the underlying buffer then closes the conn
|
||||||
|
func (c *conn) Close() Error {
|
||||||
|
err := c.buf.Flush()
|
||||||
|
err = c.closer.Close()
|
||||||
|
if err != nil {
|
||||||
|
return WrapError(ConnCloseErr, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,147 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
// ErrorCode specifies types of errors for later identification
|
||||||
|
type ErrorCode int
|
||||||
|
|
||||||
|
// Core ErrorCodes
|
||||||
|
const (
|
||||||
|
ConnWriteErr ErrorCode = -1
|
||||||
|
ConnReadErr ErrorCode = -2
|
||||||
|
ConnCloseErr ErrorCode = -3
|
||||||
|
ListenerResolveErr ErrorCode = -4
|
||||||
|
ListenerBeginErr ErrorCode = -5
|
||||||
|
ListenerAcceptErr ErrorCode = -6
|
||||||
|
InvalidIPErr ErrorCode = -7
|
||||||
|
InvalidPortErr ErrorCode = -8
|
||||||
|
FileOpenErr ErrorCode = -9
|
||||||
|
FileStatErr ErrorCode = -10
|
||||||
|
FileReadErr ErrorCode = -11
|
||||||
|
FileTypeErr ErrorCode = -12
|
||||||
|
DirectoryReadErr ErrorCode = -13
|
||||||
|
RestrictedPathErr ErrorCode = -14
|
||||||
|
InvalidRequestErr ErrorCode = -15
|
||||||
|
CGIStartErr ErrorCode = -16
|
||||||
|
CGIExitCodeErr ErrorCode = -17
|
||||||
|
CGIStatus400Err ErrorCode = -18
|
||||||
|
CGIStatus401Err ErrorCode = -19
|
||||||
|
CGIStatus403Err ErrorCode = -20
|
||||||
|
CGIStatus404Err ErrorCode = -21
|
||||||
|
CGIStatus408Err ErrorCode = -22
|
||||||
|
CGIStatus410Err ErrorCode = -23
|
||||||
|
CGIStatus500Err ErrorCode = -24
|
||||||
|
CGIStatus501Err ErrorCode = -25
|
||||||
|
CGIStatus503Err ErrorCode = -26
|
||||||
|
CGIStatusUnknownErr ErrorCode = -27
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error specifies error interface with identifiable ErrorCode
|
||||||
|
type Error interface {
|
||||||
|
Code() ErrorCode
|
||||||
|
Error() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// getExtendedErrorMessage converts an ErrorCode to string message
|
||||||
|
var getExtendedErrorMessage func(ErrorCode) string
|
||||||
|
|
||||||
|
// getErrorMessage converts an ErrorCode to string message first checking internal codes, next user supplied
|
||||||
|
func getErrorMessage(code ErrorCode) string {
|
||||||
|
switch code {
|
||||||
|
case ConnWriteErr:
|
||||||
|
return connWriteErrStr
|
||||||
|
case ConnReadErr:
|
||||||
|
return connReadErrStr
|
||||||
|
case ConnCloseErr:
|
||||||
|
return connCloseErrStr
|
||||||
|
case ListenerResolveErr:
|
||||||
|
return listenerResolveErrStr
|
||||||
|
case ListenerBeginErr:
|
||||||
|
return listenerBeginErrStr
|
||||||
|
case ListenerAcceptErr:
|
||||||
|
return listenerAcceptErrStr
|
||||||
|
case InvalidIPErr:
|
||||||
|
return invalidIPErrStr
|
||||||
|
case InvalidPortErr:
|
||||||
|
return invalidPortErrStr
|
||||||
|
case FileOpenErr:
|
||||||
|
return fileOpenErrStr
|
||||||
|
case FileStatErr:
|
||||||
|
return fileStatErrStr
|
||||||
|
case FileReadErr:
|
||||||
|
return fileReadErrStr
|
||||||
|
case FileTypeErr:
|
||||||
|
return fileTypeErrStr
|
||||||
|
case DirectoryReadErr:
|
||||||
|
return directoryReadErrStr
|
||||||
|
case RestrictedPathErr:
|
||||||
|
return restrictedPathErrStr
|
||||||
|
case InvalidRequestErr:
|
||||||
|
return invalidRequestErrStr
|
||||||
|
case CGIStartErr:
|
||||||
|
return cgiStartErrStr
|
||||||
|
case CGIExitCodeErr:
|
||||||
|
return cgiExitCodeErrStr
|
||||||
|
case CGIStatus400Err:
|
||||||
|
return cgiStatus400ErrStr
|
||||||
|
case CGIStatus401Err:
|
||||||
|
return cgiStatus401ErrStr
|
||||||
|
case CGIStatus403Err:
|
||||||
|
return cgiStatus403ErrStr
|
||||||
|
case CGIStatus404Err:
|
||||||
|
return cgiStatus404ErrStr
|
||||||
|
case CGIStatus408Err:
|
||||||
|
return cgiStatus408ErrStr
|
||||||
|
case CGIStatus410Err:
|
||||||
|
return cgiStatus410ErrStr
|
||||||
|
case CGIStatus500Err:
|
||||||
|
return cgiStatus500ErrStr
|
||||||
|
case CGIStatus501Err:
|
||||||
|
return cgiStatus501ErrStr
|
||||||
|
case CGIStatus503Err:
|
||||||
|
return cgiStatus503ErrStr
|
||||||
|
case CGIStatusUnknownErr:
|
||||||
|
return cgiStatusUnknownErrStr
|
||||||
|
default:
|
||||||
|
return getExtendedErrorMessage(code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// regularError simply holds an ErrorCode
|
||||||
|
type regularError struct {
|
||||||
|
code ErrorCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error string for the underlying ErrorCode
|
||||||
|
func (e *regularError) Error() string {
|
||||||
|
return getErrorMessage(e.code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the underlying ErrorCode
|
||||||
|
func (e *regularError) Code() ErrorCode {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewError returns a new Error based on supplied ErrorCode
|
||||||
|
func NewError(code ErrorCode) Error {
|
||||||
|
return ®ularError{code}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrappedError wraps an existing error with new ErrorCode
|
||||||
|
type wrappedError struct {
|
||||||
|
code ErrorCode
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error string for underlying error and set ErrorCode
|
||||||
|
func (e *wrappedError) Error() string {
|
||||||
|
return getErrorMessage(e.code) + " - " + e.err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the underlying ErrorCode
|
||||||
|
func (e *wrappedError) Code() ErrorCode {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapError returns a new Error based on supplied error and ErrorCode
|
||||||
|
func WrapError(code ErrorCode, err error) Error {
|
||||||
|
return &wrappedError{code, err}
|
||||||
|
}
|
@ -0,0 +1,81 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isGeneratedType just checks if a file's contents implemented is GeneratedFileContents
|
||||||
|
func isGeneratedType(f *file) bool {
|
||||||
|
switch f.contents.(type) {
|
||||||
|
case *generatedFileContents:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// file provides a structure for managing a cached file including freshness, last refresh time etc
|
||||||
|
type file struct {
|
||||||
|
contents FileContents
|
||||||
|
lastRefresh int64
|
||||||
|
isFresh bool
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFile returns a new File based on supplied FileContents
|
||||||
|
func newFile(contents FileContents) *file {
|
||||||
|
return &file{
|
||||||
|
contents,
|
||||||
|
0,
|
||||||
|
true,
|
||||||
|
sync.RWMutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFresh returns files freshness status
|
||||||
|
func (f *file) IsFresh() bool {
|
||||||
|
return f.isFresh
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFresh sets the file as fresh
|
||||||
|
func (f *file) SetFresh() {
|
||||||
|
f.isFresh = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUnfresh sets the file as unfresh
|
||||||
|
func (f *file) SetUnfresh() {
|
||||||
|
f.isFresh = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// LastRefresh gets the time in nanoseconds of last refresh
|
||||||
|
func (f *file) LastRefresh() int64 {
|
||||||
|
return f.lastRefresh
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRefreshTime updates the lastRefresh time to the current time in nanoseconds
|
||||||
|
func (f *file) UpdateRefreshTime() {
|
||||||
|
f.lastRefresh = time.Now().UnixNano()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheContents caches the file contents using the supplied file descriptor
|
||||||
|
func (f *file) CacheContents(fd *os.File, path *Path) Error {
|
||||||
|
f.contents.Clear()
|
||||||
|
|
||||||
|
// Load the file contents into cache
|
||||||
|
err := f.contents.Load(fd, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the cache freshness
|
||||||
|
f.UpdateRefreshTime()
|
||||||
|
f.SetFresh()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToClient writes the cached file contents to the supplied client
|
||||||
|
func (f *file) WriteToClient(client *Client, path *Path) Error {
|
||||||
|
return f.contents.WriteToClient(client, path)
|
||||||
|
}
|
@ -0,0 +1,48 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
// FileContents provides an interface for caching, rendering and getting cached contents of a file
|
||||||
|
type FileContents interface {
|
||||||
|
WriteToClient(*Client, *Path) Error
|
||||||
|
Load(*os.File, *Path) Error
|
||||||
|
Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// generatedFileContents is a simple FileContents implementation for holding onto a generated (virtual) file contents
|
||||||
|
type generatedFileContents struct {
|
||||||
|
content []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToClient writes the generated file contents to the client
|
||||||
|
func (fc *generatedFileContents) WriteToClient(client *Client, path *Path) Error {
|
||||||
|
return client.Conn().WriteBytes(fc.content)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load does nothing
|
||||||
|
func (fc *generatedFileContents) Load(fd *os.File, path *Path) Error { return nil }
|
||||||
|
|
||||||
|
// Clear does nothing
|
||||||
|
func (fc *generatedFileContents) Clear() {}
|
||||||
|
|
||||||
|
// RegularFileContents is the simplest implementation of core.FileContents for regular files
|
||||||
|
type RegularFileContents struct {
|
||||||
|
contents []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToClient writes the current contents of FileContents to the client
|
||||||
|
func (fc *RegularFileContents) WriteToClient(client *Client, path *Path) Error {
|
||||||
|
return client.Conn().WriteBytes(fc.contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load takes an open FD and loads the file contents into FileContents memory
|
||||||
|
func (fc *RegularFileContents) Load(fd *os.File, path *Path) Error {
|
||||||
|
var err Error
|
||||||
|
fc.contents, err = FileSystem.ReadFile(fd)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear empties currently cached FileContents memory
|
||||||
|
func (fc *RegularFileContents) Clear() {
|
||||||
|
fc.contents = nil
|
||||||
|
}
|
@ -0,0 +1,343 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// FileReadBufSize is the file read buffer size
|
||||||
|
fileReadBufSize int
|
||||||
|
|
||||||
|
// MonitorSleepTime is the duration the goroutine should periodically sleep before running file cache freshness checks
|
||||||
|
monitorSleepTime time.Duration
|
||||||
|
|
||||||
|
// FileSizeMax is the maximum file size that is alloewd to be cached
|
||||||
|
fileSizeMax int64
|
||||||
|
|
||||||
|
// FileSystem is the global FileSystem object
|
||||||
|
FileSystem *FileSystemObject
|
||||||
|
|
||||||
|
// userDir is the set subdir name to be looked for under user's home folders
|
||||||
|
userDir string
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileSystemObject holds onto an LRUCacheMap and manages access to it, handless freshness checking and multi-threading
|
||||||
|
type FileSystemObject struct {
|
||||||
|
cache *lruCacheMap
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileSystemObject returns a new FileSystemObject
|
||||||
|
func newFileSystemObject(size int) *FileSystemObject {
|
||||||
|
return &FileSystemObject{
|
||||||
|
newLRUCacheMap(size),
|
||||||
|
sync.RWMutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartMonitor starts the FileSystemObject freshness check monitor in its own goroutine
|
||||||
|
func (fs *FileSystemObject) StartMonitor() {
|
||||||
|
for {
|
||||||
|
// Sleep to not take up all the precious CPU time :)
|
||||||
|
time.Sleep(monitorSleepTime)
|
||||||
|
|
||||||
|
// Check file cache freshness
|
||||||
|
fs.checkCacheFreshness()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkCacheFreshness iterates through FileSystemObject's cache and check for freshness
|
||||||
|
func (fs *FileSystemObject) checkCacheFreshness() {
|
||||||
|
// Before anything get cache lock
|
||||||
|
fs.Lock()
|
||||||
|
|
||||||
|
fs.cache.Iterate(func(path string, f *file) {
|
||||||
|
// If this is a generated file we skip
|
||||||
|
if isGeneratedType(f) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check file still exists on disk
|
||||||
|
stat, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
SystemLog.Error("Failed to stat file in cache: %s\n", path)
|
||||||
|
fs.cache.Remove(path)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get last mod time and check freshness
|
||||||
|
lastMod := stat.ModTime().UnixNano()
|
||||||
|
if f.IsFresh() && f.LastRefresh() < lastMod {
|
||||||
|
f.SetUnfresh()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Done! Unlock (:
|
||||||
|
fs.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFile opens a file for reading (read-only, world-readable)
|
||||||
|
func (fs *FileSystemObject) OpenFile(p *Path) (*os.File, Error) {
|
||||||
|
fd, err := os.OpenFile(p.Absolute(), os.O_RDONLY, 0444)
|
||||||
|
if err != nil {
|
||||||
|
return nil, WrapError(FileOpenErr, err)
|
||||||
|
}
|
||||||
|
return fd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatFile performs a file stat on a file at path
|
||||||
|
func (fs *FileSystemObject) StatFile(p *Path) (os.FileInfo, Error) {
|
||||||
|
stat, err := os.Stat(p.Absolute())
|
||||||
|
if err != nil {
|
||||||
|
return nil, WrapError(FileStatErr, err)
|
||||||
|
}
|
||||||
|
return stat, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFile reads a supplied file descriptor into a return byte slice, or error
|
||||||
|
func (fs *FileSystemObject) ReadFile(fd *os.File) ([]byte, Error) {
|
||||||
|
// Return slice
|
||||||
|
ret := make([]byte, 0)
|
||||||
|
|
||||||
|
// Read buffer
|
||||||
|
buf := make([]byte, fileReadBufSize)
|
||||||
|
|
||||||
|
// Read through file until null bytes / error
|
||||||
|
for {
|
||||||
|
count, err := fd.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil, WrapError(FileReadErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = append(ret, buf[:count]...)
|
||||||
|
|
||||||
|
if count < fileReadBufSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanFile scans a supplied file at file descriptor, using iterator function
|
||||||
|
func (fs *FileSystemObject) ScanFile(fd *os.File, iterator func(string) bool) Error {
|
||||||
|
// Buffered reader
|
||||||
|
rdr := bufio.NewReaderSize(fd, fileReadBufSize)
|
||||||
|
|
||||||
|
// Iterate through file!
|
||||||
|
for {
|
||||||
|
// Line buffer
|
||||||
|
b := make([]byte, 0)
|
||||||
|
|
||||||
|
// Read until line-end, or file end!
|
||||||
|
for {
|
||||||
|
// Read a line
|
||||||
|
line, isPrefix, err := rdr.ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return WrapError(FileReadErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append to line buffer
|
||||||
|
b = append(b, line...)
|
||||||
|
|
||||||
|
// If not isPrefix, we can break-out
|
||||||
|
if !isPrefix {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run scan iterator on this line, break-out if requested
|
||||||
|
if !iterator(string(b)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanDirectory reads the contents of a directory and performs the iterator function on each os.FileInfo entry returned
|
||||||
|
func (fs *FileSystemObject) ScanDirectory(fd *os.File, p *Path, iterator func(os.FileInfo, *Path)) Error {
|
||||||
|
dirList, err := fd.Readdir(-1)
|
||||||
|
if err != nil {
|
||||||
|
return WrapError(DirectoryReadErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by name
|
||||||
|
sort.Sort(byName(dirList))
|
||||||
|
|
||||||
|
// Walk through the directory list using supplied iterator function
|
||||||
|
for _, info := range dirList {
|
||||||
|
// Make new Path object
|
||||||
|
fp := p.JoinPath(info.Name())
|
||||||
|
|
||||||
|
// Skip restricted files
|
||||||
|
if IsRestrictedPath(fp) || WithinCGIDir(fp) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform iterator
|
||||||
|
iterator(info, p.JoinPath(info.Name()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddGeneratedFile adds a generated file content byte slice to the file cache, with supplied path as the key
|
||||||
|
func (fs *FileSystemObject) AddGeneratedFile(p *Path, b []byte) {
|
||||||
|
// Get write lock, defer unlock
|
||||||
|
fs.Lock()
|
||||||
|
defer fs.Unlock()
|
||||||
|
|
||||||
|
// Create new generatedFileContents
|
||||||
|
contents := &generatedFileContents{b}
|
||||||
|
|
||||||
|
// Wrap contents in File
|
||||||
|
file := newFile(contents)
|
||||||
|
|
||||||
|
// Add to cache!
|
||||||
|
fs.cache.Put(p.Absolute(), file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleClient handles a Client, attempting to serve their request from the filesystem whether a regular file, gophermap, dir listing or CGI script
|
||||||
|
func (fs *FileSystemObject) HandleClient(client *Client, request *Request, newFileContents func(*Path) FileContents, handleDirectory func(*FileSystemObject, *Client, *os.File, *Path) Error) Error {
|
||||||
|
// If restricted, return error
|
||||||
|
if IsRestrictedPath(request.Path()) {
|
||||||
|
return NewError(RestrictedPathErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try remap request, log if so
|
||||||
|
ok := RemapRequest(request)
|
||||||
|
if ok {
|
||||||
|
client.LogInfo(requestRemappedStr, request.Path().Selector(), request.Params())
|
||||||
|
}
|
||||||
|
|
||||||
|
// First check for file on disk
|
||||||
|
fd, err := fs.OpenFile(request.Path())
|
||||||
|
if err != nil {
|
||||||
|
// Get read-lock, defer unlock
|
||||||
|
fs.RLock()
|
||||||
|
defer fs.RUnlock()
|
||||||
|
|
||||||
|
// Don't throw in the towel yet! Check for generated file in cache
|
||||||
|
file, ok := fs.cache.Get(request.Path().Absolute())
|
||||||
|
if !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We got a generated file! Close and send as-is
|
||||||
|
return file.WriteToClient(client, request.Path())
|
||||||
|
}
|
||||||
|
defer fd.Close()
|
||||||
|
|
||||||
|
// Get stat
|
||||||
|
stat, goErr := fd.Stat()
|
||||||
|
if goErr != nil {
|
||||||
|
// Unlock, return error
|
||||||
|
fs.RUnlock()
|
||||||
|
return WrapError(FileStatErr, goErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
// Directory
|
||||||
|
case stat.Mode()&os.ModeDir != 0:
|
||||||
|
// Don't support CGI script dir enumeration
|
||||||
|
if WithinCGIDir(request.Path()) {
|
||||||
|
return NewError(RestrictedPathErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Else enumerate dir
|
||||||
|
return handleDirectory(fs, client, fd, request.Path())
|
||||||
|
|
||||||
|
// Regular file
|
||||||
|
case stat.Mode()&os.ModeType == 0:
|
||||||
|
// Execute script if within CGI dir
|
||||||
|
if WithinCGIDir(request.Path()) {
|
||||||
|
return ExecuteCGIScript(client, request)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Else just fetch
|
||||||
|
return fs.FetchFile(client, fd, stat, request.Path(), newFileContents)
|
||||||
|
|
||||||
|
// Unsupported type
|
||||||
|
default:
|
||||||
|
return NewError(FileTypeErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchFile attempts to fetch a file from the cache, using the supplied file stat, Path and serving client. Returns Error status
|
||||||
|
func (fs *FileSystemObject) FetchFile(client *Client, fd *os.File, stat os.FileInfo, p *Path, newFileContents func(*Path) FileContents) Error {
|
||||||
|
// If file too big, write direct to client
|
||||||
|
if stat.Size() > fileSizeMax {
|
||||||
|
return client.Conn().WriteFrom(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get cache read lock, defer unlock
|
||||||
|
fs.RLock()
|
||||||
|
defer fs.RUnlock()
|
||||||
|
|
||||||
|
// Now check for file in cache
|
||||||
|
f, ok := fs.cache.Get(p.Absolute())
|
||||||
|
if !ok {
|
||||||
|
// Create new file contents with supplied function
|
||||||
|
contents := newFileContents(p)
|
||||||
|
|
||||||
|
// Wrap contents in file
|
||||||
|
f = newFile(contents)
|
||||||
|
|
||||||
|
// Cache the file contents
|
||||||
|
err := f.CacheContents(fd, p)
|
||||||
|
if err != nil {
|
||||||
|
// Unlock, return error
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get cache write lock
|
||||||
|
fs.RUnlock()
|
||||||
|
fs.Lock()
|
||||||
|
|
||||||
|
// Put file in cache
|
||||||
|
fs.cache.Put(p.Absolute(), f)
|
||||||
|
|
||||||
|
// Switch back to cache read lock, get file read lock
|
||||||
|
fs.Unlock()
|
||||||
|
fs.RLock()
|
||||||
|
f.RLock()
|
||||||
|
} else {
|
||||||
|
// Get file read lock
|
||||||
|
f.RLock()
|
||||||
|
|
||||||
|
// Check for file freshness
|
||||||
|
if !f.IsFresh() {
|
||||||
|
// Switch to file write lock
|
||||||
|
f.RUnlock()
|
||||||
|
f.Lock()
|
||||||
|
|
||||||
|
// Refresh file contents
|
||||||
|
err := f.CacheContents(fd, p)
|
||||||
|
if err != nil {
|
||||||
|
// Unlock file, return error
|
||||||
|
f.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done! Switch back to read lock
|
||||||
|
f.Unlock()
|
||||||
|
f.RLock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defer file unlock, write to client
|
||||||
|
defer f.RUnlock()
|
||||||
|
return f.WriteToClient(client, p)
|
||||||
|
}
|
@ -0,0 +1,18 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Root stores the server's root directory
|
||||||
|
Root string
|
||||||
|
|
||||||
|
// BindAddr stores the server's bound IP
|
||||||
|
BindAddr string
|
||||||
|
|
||||||
|
// Hostname stores the host's outward hostname
|
||||||
|
Hostname string
|
||||||
|
|
||||||
|
// Port stores the internal port the host is binded to
|
||||||
|
Port string
|
||||||
|
|
||||||
|
// FwdPort stores the host's outward port number
|
||||||
|
FwdPort string
|
||||||
|
)
|
@ -0,0 +1,37 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
|
// serverListener holds the global Listener object
|
||||||
|
var serverListener *listener
|
||||||
|
|
||||||
|
// listener wraps a net.TCPListener to return our own clients on each Accept()
|
||||||
|
type listener struct {
|
||||||
|
l *net.TCPListener
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListener returns a new Listener or Error
|
||||||
|
func newListener(ip, port string) (*listener, Error) {
|
||||||
|
// Try resolve provided ip and port details
|
||||||
|
laddr, err := net.ResolveTCPAddr("tcp", ip+":"+port)
|
||||||
|
if err != nil {
|
||||||
|
return nil, WrapError(ListenerResolveErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create listener!
|
||||||
|
l, err := net.ListenTCP("tcp", laddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, WrapError(ListenerBeginErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &listener{l}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept accepts a new connection and returns a client, or error
|
||||||
|
func (l *listener) Accept() (*Client, Error) {
|
||||||
|
conn, err := l.l.AcceptTCP()
|
||||||
|
if err != nil {
|
||||||
|
return nil, WrapError(ListenerAcceptErr, err)
|
||||||
|
}
|
||||||
|
return NewClient(conn), nil
|
||||||
|
}
|
@ -0,0 +1,92 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// AccessLog holds a global access LogObject
|
||||||
|
AccessLog loggerInterface
|
||||||
|
|
||||||
|
// SystemLog holds a global system LogObject
|
||||||
|
SystemLog loggerInterface
|
||||||
|
)
|
||||||
|
|
||||||
|
func setupLogger(output string) loggerInterface {
|
||||||
|
switch output {
|
||||||
|
case "stdout":
|
||||||
|
return &stdLogger{}
|
||||||
|
case "null":
|
||||||
|
return &nullLogger{}
|
||||||
|
default:
|
||||||
|
fd, err := os.OpenFile(output, os.O_CREATE|os.O_APPEND, 0600)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf(logOutputErrStr, output, err.Error())
|
||||||
|
}
|
||||||
|
return &logger{log.New(fd, "", log.LstdFlags)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoggerInterface specifies an interface that can log different message levels
|
||||||
|
type loggerInterface interface {
|
||||||
|
Info(string, ...interface{})
|
||||||
|
Error(string, ...interface{})
|
||||||
|
Fatal(string, ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdLogger implements LoggerInterface to log to output using regular log
|
||||||
|
type stdLogger struct{}
|
||||||
|
|
||||||
|
// Info logs to log.Logger with info level prefix
|
||||||
|
func (l *stdLogger) Info(fmt string, args ...interface{}) {
|
||||||
|
log.Printf(":: I :: "+fmt, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error logs to log.Logger with error level prefix
|
||||||
|
func (l *stdLogger) Error(fmt string, args ...interface{}) {
|
||||||
|
log.Printf(":: E :: "+fmt, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal logs to standard log with fatal prefix and terminates program
|
||||||
|
func (l *stdLogger) Fatal(fmt string, args ...interface{}) {
|
||||||
|
log.Fatalf(":: F :: "+fmt, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// logger implements LoggerInterface to log to output using underlying log.Logger
|
||||||
|
type logger struct {
|
||||||
|
logger *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info logs to log.Logger with info level prefix
|
||||||
|
func (l *logger) Info(fmt string, args ...interface{}) {
|
||||||
|
l.logger.Printf("I :: "+fmt, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error logs to log.Logger with error level prefix
|
||||||
|
func (l *logger) Error(fmt string, args ...interface{}) {
|
||||||
|
l.logger.Printf("E :: "+fmt, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal logs to log.Logger with fatal prefix and terminates program
|
||||||
|
func (l *logger) Fatal(fmt string, args ...interface{}) {
|
||||||
|
l.logger.Fatalf("F :: "+fmt, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nullLogger implements LoggerInterface to do absolutely fuck-all
|
||||||
|
type nullLogger struct{}
|
||||||
|
|
||||||
|
// Info does nothing
|
||||||
|
func (l *nullLogger) Info(fmt string, args ...interface{}) {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error does nothing
|
||||||
|
func (l *nullLogger) Error(fmt string, args ...interface{}) {
|
||||||
|
// do nothing
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal simply terminates the program
|
||||||
|
func (l *nullLogger) Fatal(fmt string, args ...interface{}) {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
@ -0,0 +1,118 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Path safely holds a file path
|
||||||
|
type Path struct {
|
||||||
|
root string // root dir
|
||||||
|
rel string // relative path
|
||||||
|
sel string // selector path
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPath returns a new Path structure based on supplied root and relative path
|
||||||
|
func NewPath(root, rel string) *Path {
|
||||||
|
return &Path{root, rel, formatSelector(rel)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSanitizedPath returns a new sanitized Path structure based on root and relative path
|
||||||
|
func newSanitizedPath(root, rel string) *Path {
|
||||||
|
return NewPath(root, sanitizeRawPath(root, rel))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remap remaps a Path to a new relative path, keeping previous selector
|
||||||
|
func (p *Path) Remap(newRel string) {
|
||||||
|
p.rel = sanitizeRawPath(p.root, newRel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Root returns file's root directory
|
||||||
|
func (p *Path) Root() string {
|
||||||
|
return p.root
|
||||||
|
}
|
||||||
|
|
||||||
|
// Relative returns the relative path
|
||||||
|
func (p *Path) Relative() string {
|
||||||
|
return p.rel
|
||||||
|
}
|
||||||
|
|
||||||
|
// Absolute returns the absolute path
|
||||||
|
func (p *Path) Absolute() string {
|
||||||
|
return path.Join(p.root, p.rel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Selector returns the formatted selector path
|
||||||
|
func (p *Path) Selector() string {
|
||||||
|
return p.sel
|
||||||
|
}
|
||||||
|
|
||||||
|
// RelativeDir returns the residing dir of the relative path
|
||||||
|
func (p *Path) RelativeDir() string {
|
||||||
|
return path.Dir(p.rel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SelectorDir returns the residing dir of the selector path
|
||||||
|
func (p *Path) SelectorDir() string {
|
||||||
|
return path.Dir(p.sel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dir returns a Path object at the residing dir of the calling object (keeping separate selector intact)
|
||||||
|
func (p *Path) Dir() *Path {
|
||||||
|
return &Path{p.root, p.RelativeDir(), p.SelectorDir()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinRelative returns a string appended to the current relative path
|
||||||
|
func (p *Path) JoinRelative(newRel string) string {
|
||||||
|
return path.Join(p.rel, newRel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinPath appends the supplied string to the Path's relative and selector paths
|
||||||
|
func (p *Path) JoinPath(toJoin string) *Path {
|
||||||
|
return &Path{p.root, path.Join(p.rel, toJoin), path.Join(p.sel, toJoin)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatSelector formats a relative path to a valid selector path
|
||||||
|
func formatSelector(rel string) string {
|
||||||
|
switch len(rel) {
|
||||||
|
case 0:
|
||||||
|
return "/"
|
||||||
|
case 1:
|
||||||
|
if rel[0] == '.' {
|
||||||
|
return "/"
|
||||||
|
}
|
||||||
|
return "/" + rel
|
||||||
|
default:
|
||||||
|
if rel[0] == '/' {
|
||||||
|
return rel
|
||||||
|
}
|
||||||
|
return "/" + rel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanitizeRawPath takes a root and relative path, and returns a sanitized relative path
|
||||||
|
func sanitizeRawPath(root, rel string) string {
|
||||||
|
// Start by cleaning
|
||||||
|
rel = path.Clean(rel)
|
||||||
|
|
||||||
|
if path.IsAbs(rel) {
|
||||||
|
// Absolute path, try trimming root and leading '/'
|
||||||
|
rel = strings.TrimPrefix(strings.TrimPrefix(rel, root), "/")
|
||||||
|
} else {
|
||||||
|
// Relative path, if back dir traversal give them server root
|
||||||
|
if strings.HasPrefix(rel, "..") {
|
||||||
|
rel = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rel
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanitizerUserRoot takes a generated user root directory and sanitizes it, returning a bool as to whether it's safe
|
||||||
|
func sanitizeUserRoot(root string) (string, bool) {
|
||||||
|
root = path.Clean(root)
|
||||||
|
if !strings.HasPrefix(root, "/home/") && strings.HasSuffix(root, "/"+userDir) {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return root, true
|
||||||
|
}
|
@ -0,0 +1,159 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// cgiDir is a precompiled regex statement to check if a string matches the server's CGI directory
|
||||||
|
cgiDirRegex *regexp.Regexp
|
||||||
|
|
||||||
|
// WithinCGIDir returns whether a path is within the server's specified CGI scripts directory
|
||||||
|
WithinCGIDir func(*Path) bool
|
||||||
|
|
||||||
|
// RestrictedPaths is the global slice of restricted paths
|
||||||
|
restrictedPaths []*regexp.Regexp
|
||||||
|
|
||||||
|
// IsRestrictedPath is the global function to check against restricted paths
|
||||||
|
IsRestrictedPath func(*Path) bool
|
||||||
|
|
||||||
|
// requestRemaps is the global slice of remapped paths
|
||||||
|
requestRemaps []*RequestRemap
|
||||||
|
|
||||||
|
// RemapRequest is the global function to remap a request
|
||||||
|
RemapRequest func(*Request) bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// PathMapSeparatorStr specifies the separator string to recognise in path mappings
|
||||||
|
const requestRemapSeparatorStr = " -> "
|
||||||
|
|
||||||
|
// RequestRemap is a structure to hold a remap regex to check against, and a template to apply this transformation onto
|
||||||
|
type RequestRemap struct {
|
||||||
|
Regex *regexp.Regexp
|
||||||
|
Template string
|
||||||
|
}
|
||||||
|
|
||||||
|
// compileCGIRegex takes a supplied string and returns compiled regular expression
|
||||||
|
func compileCGIRegex(cgiDir string) *regexp.Regexp {
|
||||||
|
if path.IsAbs(cgiDir) {
|
||||||
|
if !strings.HasPrefix(cgiDir, Root) {
|
||||||
|
SystemLog.Fatal(cgiDirOutsideRootStr)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cgiDir = path.Join(Root, cgiDir)
|
||||||
|
}
|
||||||
|
SystemLog.Info(cgiDirStr, cgiDir)
|
||||||
|
return regexp.MustCompile("(?m)" + cgiDir + "(|/.*)$")
|
||||||
|
}
|
||||||
|
|
||||||
|
// compileRestrictedPathsRegex turns a string of restricted paths into a slice of compiled regular expressions
|
||||||
|
func compileRestrictedPathsRegex(restrictions string) []*regexp.Regexp {
|
||||||
|
regexes := make([]*regexp.Regexp, 0)
|
||||||
|
|
||||||
|
// Split restrictions string by new lines
|
||||||
|
for _, expr := range strings.Split(restrictions, "\n") {
|
||||||
|
// Skip empty expressions
|
||||||
|
if len(expr) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile the regular expression
|
||||||
|
regex, err := regexp.Compile("(?m)" + expr + "$")
|
||||||
|
if err != nil {
|
||||||
|
SystemLog.Fatal(pathRestrictRegexCompileFailStr, expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append compiled regex and log
|
||||||
|
regexes = append(regexes, regex)
|
||||||
|
SystemLog.Info(pathRestrictRegexCompiledStr, expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return regexes
|
||||||
|
}
|
||||||
|
|
||||||
|
// compil RequestRemapRegex turns a string of remapped paths into a slice of compiled RequestRemap structures
|
||||||
|
func compileRequestRemapRegex(remaps string) []*RequestRemap {
|
||||||
|
requestRemaps := make([]*RequestRemap, 0)
|
||||||
|
|
||||||
|
// Split remaps string by new lines
|
||||||
|
for _, expr := range strings.Split(remaps, "\n") {
|
||||||
|
// Skip empty expressions
|
||||||
|
if len(expr) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split into alias and remap
|
||||||
|
split := strings.Split(expr, requestRemapSeparatorStr)
|
||||||
|
if len(split) != 2 {
|
||||||
|
SystemLog.Fatal(requestRemapRegexInvalidStr, expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile the regular expression
|
||||||
|
regex, err := regexp.Compile("(?m)" + strings.TrimPrefix(split[0], "/") + "$")
|
||||||
|
if err != nil {
|
||||||
|
SystemLog.Fatal(requestRemapRegexCompileFailStr, expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append RequestRemap and log
|
||||||
|
requestRemaps = append(requestRemaps, &RequestRemap{regex, strings.TrimPrefix(split[1], "/")})
|
||||||
|
SystemLog.Info(requestRemapRegexCompiledStr, expr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return requestRemaps
|
||||||
|
}
|
||||||
|
|
||||||
|
// withinCGIDirEnabled returns whether a Path's absolute value matches within the CGI dir
|
||||||
|
func withinCGIDirEnabled(p *Path) bool {
|
||||||
|
return cgiDirRegex.MatchString(p.Absolute())
|
||||||
|
}
|
||||||
|
|
||||||
|
// withinCGIDirDisabled always returns false, CGI is disabled
|
||||||
|
func withinCGIDirDisabled(p *Path) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRestrictedPathEnabled returns whether a Path's relative value is restricted
|
||||||
|
func isRestrictedPathEnabled(p *Path) bool {
|
||||||
|
for _, regex := range restrictedPaths {
|
||||||
|
if regex.MatchString(p.Relative()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRestrictedPathDisabled always returns false, there are no restricted paths
|
||||||
|
func isRestrictedPathDisabled(p *Path) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// remapRequestEnabled tries to remap a request, returning bool as to success
|
||||||
|
func remapRequestEnabled(request *Request) bool {
|
||||||
|
for _, remap := range requestRemaps {
|
||||||
|
// No match, gotta keep looking
|
||||||
|
if !remap.Regex.MatchString(request.Path().Selector()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new request from template and submatches
|
||||||
|
raw := make([]byte, 0)
|
||||||
|
for _, submatches := range remap.Regex.FindAllStringSubmatchIndex(request.Path().Selector(), -1) {
|
||||||
|
raw = remap.Regex.ExpandString(raw, remap.Template, request.Path().Selector(), submatches)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split to new path and paramters again
|
||||||
|
path, params := splitBy(string(raw), "?")
|
||||||
|
|
||||||
|
// Remap request, log, return
|
||||||
|
request.Remap(path, params)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// remapRequestDisabled always returns false, there are no remapped requests
|
||||||
|
func remapRequestDisabled(request *Request) bool {
|
||||||
|
return false
|
||||||
|
}
|
@ -0,0 +1,25 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
// Request is a data structure for storing a filesystem path, and params, parsed from a client's request
|
||||||
|
type Request struct {
|
||||||
|
p *Path
|
||||||
|
params string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the requests associate Path object
|
||||||
|
func (r *Request) Path() *Path {
|
||||||
|
return r.p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Params returns the request's parameters string
|
||||||
|
func (r *Request) Params() string {
|
||||||
|
return r.params
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remap modifies a request to use new relative path, and accommodate supplied extra parameters
|
||||||
|
func (r *Request) Remap(rel, params string) {
|
||||||
|
if len(r.params) > 0 {
|
||||||
|
r.params = params + "&" + r.params
|
||||||
|
}
|
||||||
|
r.p.Remap(rel)
|
||||||
|
}
|
@ -0,0 +1,208 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Version holds the current version string
|
||||||
|
Version = "v0.3-alpha"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// SigChannel is the global OS signal channel
|
||||||
|
sigChannel chan os.Signal
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseFlagsAndSetup parses necessary core server flags, and sets up the core ready for Start() to be called
|
||||||
|
func ParseFlagsAndSetup(errorMessageFunc func(ErrorCode) string) {
|
||||||
|
// Setup numerous temporary flag variables, and store the rest
|
||||||
|
// directly in their final operating location. Strings are stored
|
||||||
|
// in `string_constants.go` to allow for later localization
|
||||||
|
sysLog := flag.String(sysLogFlagStr, "stdout", sysLogDescStr)
|
||||||
|
accLog := flag.String(accLogFlagStr, "stdout", accLogDescStr)
|
||||||
|
flag.StringVar(&Root, rootFlagStr, "/var/gopher", rootDescStr)
|
||||||
|
flag.StringVar(&BindAddr, bindAddrFlagStr, "", bindAddrDescStr)
|
||||||
|
flag.StringVar(&Hostname, hostnameFlagStr, "localhost", hostnameDescStr)
|
||||||
|
port := flag.Uint(portFlagStr, 70, portDescStr)
|
||||||
|
fwdPort := flag.Uint(fwdPortFlagStr, 0, fwdPortDescStr)
|
||||||
|
flag.DurationVar(&connReadDeadline, readDeadlineFlagStr, time.Duration(time.Second*3), readDeadlineDescStr)
|
||||||
|
flag.DurationVar(&connWriteDeadline, writeDeadlineFlagStr, time.Duration(time.Second*5), writeDeadlineDescStr)
|
||||||
|
cReadBuf := flag.Uint(connReadBufFlagStr, 1024, connReadBufDescStr)
|
||||||
|
cWriteBuf := flag.Uint(connWriteBufFlagStr, 1024, connWriteBufDescStr)
|
||||||
|
cReadMax := flag.Uint(connReadMaxFlagStr, 4096, connReadMaxDescStr)
|
||||||
|
fReadBuf := flag.Uint(fileReadBufFlagStr, 1024, fileReadBufDescStr)
|
||||||
|
flag.DurationVar(&monitorSleepTime, monitorSleepTimeFlagStr, time.Duration(time.Second*1), monitorSleepTimeDescStr)
|
||||||
|
cacheMax := flag.Float64(cacheFileMaxFlagStr, 1.0, cacheFileMaxDescStr)
|
||||||
|
cacheSize := flag.Uint(cacheSizeFlagStr, 100, cacheSizeDescStr)
|
||||||
|
restrictedPathsList := flag.String(restrictPathsFlagStr, "", restrictPathsDescStr)
|
||||||
|
remapRequestsList := flag.String(remapRequestsFlagStr, "", remapRequestsDescStr)
|
||||||
|
cgiDir := flag.String(cgiDirFlagStr, "", cgiDirDescStr)
|
||||||
|
flag.DurationVar(&maxCGIRunTime, maxCGITimeFlagStr, time.Duration(time.Second*3), maxCGITimeDescStr)
|
||||||
|
safePath := flag.String(safePathFlagStr, "/bin:/usr/bin", safePathDescStr)
|
||||||
|
httpCompatCGI := flag.Bool(httpCompatCGIFlagStr, false, httpCompatCGIDescStr)
|
||||||
|
httpPrefixBuf := flag.Uint(httpPrefixBufFlagStr, 1024, httpPrefixBufDescStr)
|
||||||
|
flag.StringVar(&userDir, userDirFlagStr, "", userDirDescStr)
|
||||||
|
printVersion := flag.Bool(versionFlagStr, false, versionDescStr)
|
||||||
|
|
||||||
|
// Parse flags! (including any set by outer calling function)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
// If version print requested, do so!
|
||||||
|
if *printVersion {
|
||||||
|
fmt.Println("Gophor " + Version)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup loggers
|
||||||
|
SystemLog = setupLogger(*sysLog)
|
||||||
|
if sysLog == accLog {
|
||||||
|
AccessLog = SystemLog
|
||||||
|
} else {
|
||||||
|
AccessLog = setupLogger(*accLog)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check valid values for BindAddr and Hostname
|
||||||
|
if Hostname == "" {
|
||||||
|
if BindAddr == "" {
|
||||||
|
SystemLog.Fatal(hostnameBindAddrEmptyStr)
|
||||||
|
}
|
||||||
|
Hostname = BindAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change to server directory
|
||||||
|
if osErr := os.Chdir(Root); osErr != nil {
|
||||||
|
SystemLog.Fatal(chDirErrStr, osErr)
|
||||||
|
}
|
||||||
|
SystemLog.Info(chDirStr, Root)
|
||||||
|
|
||||||
|
// Set port info
|
||||||
|
if *fwdPort == 0 {
|
||||||
|
fwdPort = port
|
||||||
|
}
|
||||||
|
Port = strconv.Itoa(int(*port))
|
||||||
|
FwdPort = strconv.Itoa(int(*fwdPort))
|
||||||
|
|
||||||
|
// Setup listener
|
||||||
|
var err Error
|
||||||
|
serverListener, err = newListener(BindAddr, Port)
|
||||||
|
if err != nil {
|
||||||
|
SystemLog.Fatal(listenerBeginFailStr, BindAddr, Port, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Host buffer sizes
|
||||||
|
connReadBufSize = int(*cReadBuf)
|
||||||
|
connWriteBufSize = int(*cWriteBuf)
|
||||||
|
connReadMax = int(*cReadMax)
|
||||||
|
fileReadBufSize = int(*fReadBuf)
|
||||||
|
|
||||||
|
// FileSystemObject (and related) setup
|
||||||
|
fileSizeMax = int64(1048576.0 * *cacheMax) // gets megabytes value in bytes
|
||||||
|
FileSystem = newFileSystemObject(int(*cacheSize))
|
||||||
|
|
||||||
|
// If no restricted files provided, set to the disabled function. Else, compile and enable
|
||||||
|
if *restrictedPathsList == "" {
|
||||||
|
SystemLog.Info(pathRestrictionsDisabledStr)
|
||||||
|
IsRestrictedPath = isRestrictedPathDisabled
|
||||||
|
} else {
|
||||||
|
SystemLog.Info(pathRestrictionsEnabledStr)
|
||||||
|
restrictedPaths = compileRestrictedPathsRegex(*restrictedPathsList)
|
||||||
|
IsRestrictedPath = isRestrictedPathEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no remapped files provided, set to the disabled function. Else, compile and enable
|
||||||
|
if *remapRequestsList == "" {
|
||||||
|
SystemLog.Info(requestRemapDisabledStr)
|
||||||
|
RemapRequest = remapRequestDisabled
|
||||||
|
} else {
|
||||||
|
SystemLog.Info(requestRemapEnabledStr)
|
||||||
|
requestRemaps = compileRequestRemapRegex(*remapRequestsList)
|
||||||
|
RemapRequest = remapRequestEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no CGI dir supplied, set to disabled function. Else, compile and enable
|
||||||
|
if *cgiDir == "" {
|
||||||
|
SystemLog.Info(cgiSupportDisabledStr)
|
||||||
|
WithinCGIDir = withinCGIDirDisabled
|
||||||
|
} else {
|
||||||
|
SystemLog.Info(cgiSupportEnabledStr)
|
||||||
|
cgiDirRegex = compileCGIRegex(*cgiDir)
|
||||||
|
cgiEnv = setupInitialCGIEnv(*safePath)
|
||||||
|
WithinCGIDir = withinCGIDirEnabled
|
||||||
|
|
||||||
|
// Enable HTTP compatible CGI scripts, or not
|
||||||
|
if *httpCompatCGI {
|
||||||
|
SystemLog.Info(cgiHTTPCompatEnabledStr, httpPrefixBuf)
|
||||||
|
ExecuteCGIScript = executeCGIScriptStripHTTP
|
||||||
|
httpPrefixBufSize = int(*httpPrefixBuf)
|
||||||
|
} else {
|
||||||
|
ExecuteCGIScript = executeCGIScriptNoHTTP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no user dir supplied, set to disabled function. Else, set user dir and enable
|
||||||
|
if userDir == "" {
|
||||||
|
SystemLog.Info(userDirDisabledStr)
|
||||||
|
getRequestPath = getRequestPathUserDirDisabled
|
||||||
|
} else {
|
||||||
|
SystemLog.Info(userDirEnabledStr)
|
||||||
|
getRequestPath = getRequestPathUserDirEnabled
|
||||||
|
|
||||||
|
// Clean the user dir to be safe
|
||||||
|
userDir = path.Clean(userDir)
|
||||||
|
if strings.HasPrefix(userDir, "..") {
|
||||||
|
SystemLog.Fatal(userDirBackTraverseErrStr, userDir)
|
||||||
|
} else {
|
||||||
|
SystemLog.Info(userDirStr, userDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set ErrorCode->string function
|
||||||
|
getExtendedErrorMessage = errorMessageFunc
|
||||||
|
|
||||||
|
// Setup signal channel
|
||||||
|
sigChannel = make(chan os.Signal)
|
||||||
|
signal.Notify(sigChannel, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins operation of the server
|
||||||
|
func Start(serve func(*Client)) {
|
||||||
|
// Start the FileSystemObject cache freshness monitor
|
||||||
|
SystemLog.Info(cacheMonitorStartStr, monitorSleepTime)
|
||||||
|
go FileSystem.StartMonitor()
|
||||||
|
|
||||||
|
// Start the listener
|
||||||
|
SystemLog.Info(listeningOnStr, BindAddr, Port, Hostname, FwdPort)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
client, err := serverListener.Accept()
|
||||||
|
if err != nil {
|
||||||
|
SystemLog.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve client then close in separate goroutine
|
||||||
|
go func() {
|
||||||
|
serve(client)
|
||||||
|
client.Conn().Close()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Listen for OS signals and terminate if necessary
|
||||||
|
listenForOSSignals()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListenForOSSignals listens for OS signals and terminates the program if necessary
|
||||||
|
func listenForOSSignals() {
|
||||||
|
sig := <-sigChannel
|
||||||
|
SystemLog.Info(signalReceivedStr, sig)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
@ -0,0 +1,151 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
// Core flag string constants
|
||||||
|
const (
|
||||||
|
sysLogFlagStr = "sys-log"
|
||||||
|
sysLogDescStr = "System log output location ['stdout', 'null', $filename]"
|
||||||
|
|
||||||
|
accLogFlagStr = "acc-log"
|
||||||
|
accLogDescStr = "Access log output location ['stdout', 'null', $filename]"
|
||||||
|
|
||||||
|
rootFlagStr = "root"
|
||||||
|
rootDescStr = "Server root directory"
|
||||||
|
|
||||||
|
bindAddrFlagStr = "bind-addr"
|
||||||
|
bindAddrDescStr = "IP address to bind to"
|
||||||
|
|
||||||
|
hostnameFlagStr = "hostname"
|
||||||
|
hostnameDescStr = "Server hostname (FQDN)"
|
||||||
|
|
||||||
|
portFlagStr = "port"
|
||||||
|
portDescStr = "Port to listen on"
|
||||||
|
|
||||||
|
fwdPortFlagStr = "fwd-port"
|
||||||
|
fwdPortDescStr = "Outward-facing port"
|
||||||
|
|
||||||
|
readDeadlineFlagStr = "read-deadline"
|
||||||
|
readDeadlineDescStr = "Connection read deadline (timeout)"
|
||||||
|
|
||||||
|
writeDeadlineFlagStr = "write-deadline"
|
||||||
|
writeDeadlineDescStr = "Connection write deadline (timeout)"
|
||||||
|
|
||||||
|
connReadBufFlagStr = "conn-read-buf"
|
||||||
|
connReadBufDescStr = "Connection read buffer size (bytes)"
|
||||||
|
|
||||||
|
connWriteBufFlagStr = "conn-write-buf"
|
||||||
|
connWriteBufDescStr = "Connection write buffer size (bytes)"
|
||||||
|
|
||||||
|
connReadMaxFlagStr = "conn-read-max"
|
||||||
|
connReadMaxDescStr = "Connection read max (bytes)"
|
||||||
|
|
||||||
|
fileReadBufFlagStr = "file-read-buf"
|
||||||
|
fileReadBufDescStr = "File read buffer size (bytes)"
|
||||||
|
|
||||||
|
monitorSleepTimeFlagStr = "cache-monitor-freq"
|
||||||
|
monitorSleepTimeDescStr = "File cache freshness monitor frequency"
|
||||||
|
|
||||||
|
cacheFileMaxFlagStr = "cache-file-max"
|
||||||
|
cacheFileMaxDescStr = "Max cached file size (megabytes)"
|
||||||
|
|
||||||
|
cacheSizeFlagStr = "cache-size"
|
||||||
|
cacheSizeDescStr = "File cache size"
|
||||||
|
|
||||||
|
restrictPathsFlagStr = "restrict-paths"
|
||||||
|
restrictPathsDescStr = "Restrict paths as new-line separated list of regex statements (see documenation)"
|
||||||
|
|
||||||
|
remapRequestsFlagStr = "remap-requests"
|
||||||
|
remapRequestsDescStr = "Remap requests as new-line separated list of remap statements (see documenation)"
|
||||||
|
|
||||||
|
cgiDirFlagStr = "cgi-dir"
|
||||||
|
cgiDirDescStr = "CGI scripts directory (empty to disable)"
|
||||||
|
|
||||||
|
maxCGITimeFlagStr = "max-cgi-time"
|
||||||
|
maxCGITimeDescStr = "Max CGI script execution time"
|
||||||
|
|
||||||
|
safePathFlagStr = "safe-path"
|
||||||
|
safePathDescStr = "CGI environment safe PATH variable"
|
||||||
|
|
||||||
|
httpCompatCGIFlagStr = "http-compat-cgi"
|
||||||
|
httpCompatCGIDescStr = "Enable HTTP compatibility for CGI scripts by stripping headers"
|
||||||
|
|
||||||
|
httpPrefixBufFlagStr = "http-prefix-buf"
|
||||||
|
httpPrefixBufDescStr = "Buffer size used for stripping HTTP headers"
|
||||||
|
|
||||||
|
userDirFlagStr = "user-dir"
|
||||||
|
userDirDescStr = "User's personal server directory"
|
||||||
|
|
||||||
|
versionFlagStr = "version"
|
||||||
|
versionDescStr = "Print version string"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Log string constants
|
||||||
|
const (
|
||||||
|
hostnameBindAddrEmptyStr = "At least one of hostname or bind-addr must be non-empty!"
|
||||||
|
|
||||||
|
chDirStr = "Entered server dir: %s"
|
||||||
|
chDirErrStr = "Error entering server directory: %s"
|
||||||
|
|
||||||
|
listenerBeginFailStr = "Failed to start listener on %s:%s (%s)"
|
||||||
|
listeningOnStr = "Listening on: %s:%s (%s:%s)"
|
||||||
|
|
||||||
|
cacheMonitorStartStr = "Starting cache monitor with freq: %s"
|
||||||
|
|
||||||
|
pathRestrictionsEnabledStr = "Path restrictions enabled"
|
||||||
|
pathRestrictionsDisabledStr = "Path restrictions disabled"
|
||||||
|
pathRestrictRegexCompileFailStr = "Failed compiling restricted path regex: %s"
|
||||||
|
pathRestrictRegexCompiledStr = "Compiled restricted path regex: %s"
|
||||||
|
|
||||||
|
requestRemapEnabledStr = "Request remapping enabled"
|
||||||
|
requestRemapDisabledStr = "Request remapping disabled"
|
||||||
|
requestRemapRegexInvalidStr = "Invalid request remap regex: %s"
|
||||||
|
requestRemapRegexCompileFailStr = "Failed compiling request remap regex: %s"
|
||||||
|
requestRemapRegexCompiledStr = "Compiled path remap regex: %s"
|
||||||
|
requestRemappedStr = "Remapped request: %s %s"
|
||||||
|
|
||||||
|
cgiSupportEnabledStr = "CGI script support enabled"
|
||||||
|
cgiSupportDisabledStr = "CGI script support disabled"
|
||||||
|
cgiDirOutsideRootStr = "CGI directory must not be outside server root!"
|
||||||
|
cgiDirStr = "CGI directory: %s"
|
||||||
|
cgiHTTPCompatEnabledStr = "CGI HTTP compatibility enabled, prefix buffer: %d"
|
||||||
|
cgiExecuteErrStr = "Exit executing: %s [%d]"
|
||||||
|
|
||||||
|
userDirEnabledStr = "User directory support enabled"
|
||||||
|
userDirDisabledStr = "User directory support disabled"
|
||||||
|
userDirBackTraverseErrStr = "User directory with back-traversal not supported: %s"
|
||||||
|
userDirStr = "User directory: %s"
|
||||||
|
|
||||||
|
signalReceivedStr = "Signal received: %v. Shutting down..."
|
||||||
|
|
||||||
|
logOutputErrStr = "Error opening log output %s: %s"
|
||||||
|
|
||||||
|
pgidNotFoundErrStr = "Process unfinished, PGID not found!"
|
||||||
|
pgidStopErrStr = "Error stopping process group %d: %s"
|
||||||
|
|
||||||
|
connWriteErrStr = "Conn write error"
|
||||||
|
connReadErrStr = "Conn read error"
|
||||||
|
connCloseErrStr = "Conn close error"
|
||||||
|
listenerResolveErrStr = "Listener resolve error"
|
||||||
|
listenerBeginErrStr = "Listener begin error"
|
||||||
|
listenerAcceptErrStr = "Listener accept error"
|
||||||
|
invalidIPErrStr = "Invalid IP"
|
||||||
|
invalidPortErrStr = "Invalid port"
|
||||||
|
fileOpenErrStr = "File open error"
|
||||||
|
fileStatErrStr = "File stat error"
|
||||||
|
fileReadErrStr = "File read error"
|
||||||
|
fileTypeErrStr = "Unsupported file type"
|
||||||
|
directoryReadErrStr = "Directory read error"
|
||||||
|
restrictedPathErrStr = "Restricted path"
|
||||||
|
invalidRequestErrStr = "Invalid request"
|
||||||
|
cgiStartErrStr = "CGI start error"
|
||||||
|
cgiExitCodeErrStr = "CGI non-zero exit code"
|
||||||
|
cgiStatus400ErrStr = "CGI status: 400"
|
||||||
|
cgiStatus401ErrStr = "CGI status: 401"
|
||||||
|
cgiStatus403ErrStr = "CGI status: 403"
|
||||||
|
cgiStatus404ErrStr = "CGI status: 404"
|
||||||
|
cgiStatus408ErrStr = "CGI status: 408"
|
||||||
|
cgiStatus410ErrStr = "CGI status: 410"
|
||||||
|
cgiStatus500ErrStr = "CGI status: 500"
|
||||||
|
cgiStatus501ErrStr = "CGI status: 501"
|
||||||
|
cgiStatus503ErrStr = "CGI status: 503"
|
||||||
|
cgiStatusUnknownErrStr = "CGI status: unknown"
|
||||||
|
)
|
@ -0,0 +1,75 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// getRequestPaths points to either of the getRequestPath____ functions
|
||||||
|
getRequestPath func(string) *Path
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseURLEncodedRequest takes a received string and safely parses a request from this
|
||||||
|
func ParseURLEncodedRequest(received string) (*Request, Error) {
|
||||||
|
// Check for ASCII control bytes
|
||||||
|
for i := 0; i < len(received); i++ {
|
||||||
|
if received[i] < ' ' || received[i] == 0x7f {
|
||||||
|
return nil, NewError(InvalidRequestErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split into 2 substrings by '?'. URL path and query
|
||||||
|
rawPath, params := splitBy(received, "?")
|
||||||
|
|
||||||
|
// Unescape path
|
||||||
|
rawPath, err := url.PathUnescape(rawPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, WrapError(InvalidRequestErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return new request
|
||||||
|
return &Request{getRequestPath(rawPath), params}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseInternalRequest parses an internal request string based on the current directory
|
||||||
|
func ParseInternalRequest(p *Path, line string) *Request {
|
||||||
|
rawPath, params := splitBy(line, "?")
|
||||||
|
if path.IsAbs(rawPath) {
|
||||||
|
return &Request{getRequestPath(rawPath), params}
|
||||||
|
}
|
||||||
|
return &Request{newSanitizedPath(p.Root(), rawPath), params}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRequestPathUserDirEnabled creates a Path object from raw path, converting ~USER to user subdirectory roots, else at server root
|
||||||
|
func getRequestPathUserDirEnabled(rawPath string) *Path {
|
||||||
|
if userPath := strings.TrimPrefix(rawPath, "/"); strings.HasPrefix(userPath, "~") {
|
||||||
|
// We found a user path! Split into the user part, and remaining path
|
||||||
|
user, remaining := splitBy(userPath, "/")
|
||||||
|
|
||||||
|
// Empty user, we been duped! Return server root
|
||||||
|
if len(user) <= 1 {
|
||||||
|
return &Path{Root, "", "/"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get sanitized user root, else return server root
|
||||||
|
root, ok := sanitizeUserRoot(path.Join("/home", user[1:], userDir))
|
||||||
|
if !ok {
|
||||||
|
return &Path{Root, "", "/"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build new Path
|
||||||
|
rel := sanitizeRawPath(root, remaining)
|
||||||
|
sel := "/~" + user[1:] + formatSelector(rel)
|
||||||
|
return &Path{root, rel, sel}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return regular server root + rawPath
|
||||||
|
return newSanitizedPath(Root, rawPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRequestPathUserDirDisabled creates a Path object from raw path, always at server root
|
||||||
|
func getRequestPathUserDirDisabled(rawPath string) *Path {
|
||||||
|
return newSanitizedPath(Root, rawPath)
|
||||||
|
}
|
@ -0,0 +1,22 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// byName and its associated functions provide a quick method of sorting FileInfos by name
|
||||||
|
type byName []os.FileInfo
|
||||||
|
|
||||||
|
func (s byName) Len() int { return len(s) }
|
||||||
|
func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
||||||
|
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
// SplitBy takes an input string and a delimiter, returning the resulting two strings from the split (ALWAYS 2)
|
||||||
|
func splitBy(input, delim string) (string, string) {
|
||||||
|
split := strings.SplitN(input, delim, 2)
|
||||||
|
if len(split) == 2 {
|
||||||
|
return split[0], split[1]
|
||||||
|
}
|
||||||
|
return split[0], ""
|
||||||
|
}
|
@ -1,251 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Simple error code type defs */
|
|
||||||
type ErrorCode int
|
|
||||||
type ErrorResponseCode int
|
|
||||||
const (
|
|
||||||
/* Filesystem */
|
|
||||||
PathEnumerationErr ErrorCode = iota
|
|
||||||
IllegalPathErr ErrorCode = iota
|
|
||||||
FileStatErr ErrorCode = iota
|
|
||||||
FileOpenErr ErrorCode = iota
|
|
||||||
FileReadErr ErrorCode = iota
|
|
||||||
FileTypeErr ErrorCode = iota
|
|
||||||
DirListErr ErrorCode = iota
|
|
||||||
|
|
||||||
/* Sockets */
|
|
||||||
SocketWriteErr ErrorCode = iota
|
|
||||||
SocketWriteRawErr ErrorCode = iota
|
|
||||||
|
|
||||||
/* Parsing */
|
|
||||||
InvalidRequestErr ErrorCode = iota
|
|
||||||
EmptyItemTypeErr ErrorCode = iota
|
|
||||||
InvalidGophermapErr ErrorCode = iota
|
|
||||||
|
|
||||||
/* Executing */
|
|
||||||
CommandStartErr ErrorCode = iota
|
|
||||||
CommandExitCodeErr ErrorCode = iota
|
|
||||||
CgiOutputErr ErrorCode = iota
|
|
||||||
CgiDisabledErr ErrorCode = iota
|
|
||||||
RestrictedCommandErr ErrorCode = iota
|
|
||||||
|
|
||||||
/* Wrapping CGI http status codes */
|
|
||||||
CgiStatus400Err ErrorCode = iota
|
|
||||||
CgiStatus401Err ErrorCode = iota
|
|
||||||
CgiStatus403Err ErrorCode = iota
|
|
||||||
CgiStatus404Err ErrorCode = iota
|
|
||||||
CgiStatus408Err ErrorCode = iota
|
|
||||||
CgiStatus410Err ErrorCode = iota
|
|
||||||
CgiStatus500Err ErrorCode = iota
|
|
||||||
CgiStatus501Err ErrorCode = iota
|
|
||||||
CgiStatus503Err ErrorCode = iota
|
|
||||||
CgiStatusUnknownErr ErrorCode = iota
|
|
||||||
|
|
||||||
/* Error Response Codes */
|
|
||||||
ErrorResponse200 ErrorResponseCode = iota
|
|
||||||
ErrorResponse400 ErrorResponseCode = iota
|
|
||||||
ErrorResponse401 ErrorResponseCode = iota
|
|
||||||
ErrorResponse403 ErrorResponseCode = iota
|
|
||||||
ErrorResponse404 ErrorResponseCode = iota
|
|
||||||
ErrorResponse408 ErrorResponseCode = iota
|
|
||||||
ErrorResponse410 ErrorResponseCode = iota
|
|
||||||
ErrorResponse500 ErrorResponseCode = iota
|
|
||||||
ErrorResponse501 ErrorResponseCode = iota
|
|
||||||
ErrorResponse503 ErrorResponseCode = iota
|
|
||||||
NoResponse ErrorResponseCode = iota
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Simple GophorError data structure to wrap another error */
|
|
||||||
type GophorError struct {
|
|
||||||
Code ErrorCode
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Convert error code to string */
|
|
||||||
func (e *GophorError) Error() string {
|
|
||||||
var str string
|
|
||||||
switch e.Code {
|
|
||||||
case PathEnumerationErr:
|
|
||||||
str = "path enumeration fail"
|
|
||||||
case IllegalPathErr:
|
|
||||||
str = "illegal path requested"
|
|
||||||
case FileStatErr:
|
|
||||||
str = "file stat fail"
|
|
||||||
case FileOpenErr:
|
|
||||||
str = "file open fail"
|
|
||||||
case FileReadErr:
|
|
||||||
str = "file read fail"
|
|
||||||
case FileTypeErr:
|
|
||||||
str = "invalid file type"
|
|
||||||
case DirListErr:
|
|
||||||
str = "directory read fail"
|
|
||||||
|
|
||||||
case SocketWriteErr:
|
|
||||||
str = "socket write error"
|
|
||||||
case SocketWriteRawErr:
|
|
||||||
str = "socket write readFrom error"
|
|
||||||
|
|
||||||
case InvalidRequestErr:
|
|
||||||
str = "invalid request data"
|
|
||||||
case InvalidGophermapErr:
|
|
||||||
str = "invalid gophermap"
|
|
||||||
|
|
||||||
case CommandStartErr:
|
|
||||||
str = "command start fail"
|
|
||||||
case CgiOutputErr:
|
|
||||||
str = "cgi output format error"
|
|
||||||
case CommandExitCodeErr:
|
|
||||||
str = "command exit code non-zero"
|
|
||||||
case CgiDisabledErr:
|
|
||||||
str = "ignoring /cgi-bin request, CGI disabled"
|
|
||||||
case RestrictedCommandErr:
|
|
||||||
str = "command use restricted"
|
|
||||||
|
|
||||||
case CgiStatus400Err:
|
|
||||||
str = "CGI script error status 400"
|
|
||||||
case CgiStatus401Err:
|
|
||||||
str = "CGI script error status 401"
|
|
||||||
case CgiStatus403Err:
|
|
||||||
str = "CGI script error status 403"
|
|
||||||
case CgiStatus404Err:
|
|
||||||
str = "CGI script error status 404"
|
|
||||||
case CgiStatus408Err:
|
|
||||||
str = "CGI script error status 408"
|
|
||||||
case CgiStatus410Err:
|
|
||||||
str = "CGI script error status 410"
|
|
||||||
case CgiStatus500Err:
|
|
||||||
str = "CGI script error status 500"
|
|
||||||
case CgiStatus501Err:
|
|
||||||
str = "CGI script error status 501"
|
|
||||||
case CgiStatus503Err:
|
|
||||||
str = "CGI script error status 503"
|
|
||||||
case CgiStatusUnknownErr:
|
|
||||||
str = "CGI script error unknown status code"
|
|
||||||
|
|
||||||
default:
|
|
||||||
str = "Unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.Err != nil {
|
|
||||||
return fmt.Sprintf("%s (%s)", str, e.Err.Error())
|
|
||||||
} else {
|
|
||||||
return fmt.Sprintf("%s", str)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Convert a gophor error code to appropriate error response code */
|
|
||||||
func gophorErrorToResponseCode(code ErrorCode) ErrorResponseCode {
|
|
||||||
switch code {
|
|
||||||
case PathEnumerationErr:
|
|
||||||
return ErrorResponse400
|
|
||||||
case IllegalPathErr:
|
|
||||||
return ErrorResponse403
|
|
||||||
case FileStatErr:
|
|
||||||
return ErrorResponse404
|
|
||||||
case FileOpenErr:
|
|
||||||
return ErrorResponse404
|
|
||||||
case FileReadErr:
|
|
||||||
return ErrorResponse404
|
|
||||||
case FileTypeErr:
|
|
||||||
/* If wrong file type, just assume file not there */
|
|
||||||
return ErrorResponse404
|
|
||||||
case DirListErr:
|
|
||||||
return ErrorResponse404
|
|
||||||
|
|
||||||
/* These are errors _while_ sending, no point trying to send error */
|
|
||||||
case SocketWriteErr:
|
|
||||||
return NoResponse
|
|
||||||
case SocketWriteRawErr:
|
|
||||||
return NoResponse
|
|
||||||
|
|
||||||
case InvalidRequestErr:
|
|
||||||
return ErrorResponse400
|
|
||||||
case InvalidGophermapErr:
|
|
||||||
return ErrorResponse500
|
|
||||||
|
|
||||||
case CommandStartErr:
|
|
||||||
return ErrorResponse500
|
|
||||||
case CommandExitCodeErr:
|
|
||||||
return ErrorResponse500
|
|
||||||
case CgiOutputErr:
|
|
||||||
return ErrorResponse500
|
|
||||||
case CgiDisabledErr:
|
|
||||||
return ErrorResponse404
|
|
||||||
case RestrictedCommandErr:
|
|
||||||
return ErrorResponse500
|
|
||||||
|
|
||||||
case CgiStatus400Err:
|
|
||||||
return ErrorResponse400
|
|
||||||
case CgiStatus401Err:
|
|
||||||
return ErrorResponse401
|
|
||||||
case CgiStatus403Err:
|
|
||||||
return ErrorResponse403
|
|
||||||
case CgiStatus404Err:
|
|
||||||
return ErrorResponse404
|
|
||||||
case CgiStatus408Err:
|
|
||||||
return ErrorResponse408
|
|
||||||
case CgiStatus410Err:
|
|
||||||
return ErrorResponse410
|
|
||||||
case CgiStatus500Err:
|
|
||||||
return ErrorResponse500
|
|
||||||
case CgiStatus501Err:
|
|
||||||
return ErrorResponse501
|
|
||||||
case CgiStatus503Err:
|
|
||||||
return ErrorResponse503
|
|
||||||
case CgiStatusUnknownErr:
|
|
||||||
return ErrorResponse500
|
|
||||||
|
|
||||||
default:
|
|
||||||
return ErrorResponse503
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Generates gopher protocol compatible error response from our code */
|
|
||||||
func generateGopherErrorResponseFromCode(code ErrorCode) []byte {
|
|
||||||
responseCode := gophorErrorToResponseCode(code)
|
|
||||||
if responseCode == NoResponse {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return generateGopherErrorResponse(responseCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Generates gopher protocol compatible error response for response code */
|
|
||||||
func generateGopherErrorResponse(code ErrorResponseCode) []byte {
|
|
||||||
return buildErrorLine(code.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Error response code to string */
|
|
||||||
func (e ErrorResponseCode) String() string {
|
|
||||||
switch e {
|
|
||||||
case ErrorResponse200:
|
|
||||||
/* Should not have reached here */
|
|
||||||
Config.SysLog.Fatal("", "Passed error response 200 to error handler, SHOULD NOT HAVE DONE THIS\n")
|
|
||||||
return ""
|
|
||||||
case ErrorResponse400:
|
|
||||||
return "400 Bad Request"
|
|
||||||
case ErrorResponse401:
|
|
||||||
return "401 Unauthorised"
|
|
||||||
case ErrorResponse403:
|
|
||||||
return "403 Forbidden"
|
|
||||||
case ErrorResponse404:
|
|
||||||
return "404 Not Found"
|
|
||||||
case ErrorResponse408:
|
|
||||||
return "408 Request Time-out"
|
|
||||||
case ErrorResponse410:
|
|
||||||
return "410 Gone"
|
|
||||||
case ErrorResponse500:
|
|
||||||
return "500 Internal Server Error"
|
|
||||||
case ErrorResponse501:
|
|
||||||
return "501 Not Implemented"
|
|
||||||
case ErrorResponse503:
|
|
||||||
return "503 Service Unavailable"
|
|
||||||
default:
|
|
||||||
/* Should not have reached here */
|
|
||||||
Config.SysLog.Fatal("", "Unhandled ErrorResponseCode type\n")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,160 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os/exec"
|
|
||||||
"syscall"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Setup initial (i.e. constant) gophermap / command environment variables */
|
|
||||||
func setupExecEnviron(path string) []string {
|
|
||||||
return []string {
|
|
||||||
envKeyValue("PATH", path),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup initial (i.e. constant) CGI environment variables */
|
|
||||||
func setupInitialCgiEnviron(path, charset string) []string {
|
|
||||||
return []string{
|
|
||||||
/* RFC 3875 standard */
|
|
||||||
envKeyValue("GATEWAY_INTERFACE", "CGI/1.1"), /* MUST be set to the dialect of CGI being used by the server */
|
|
||||||
envKeyValue("SERVER_SOFTWARE", "gophor/"+GophorVersion), /* MUST be set to name and version of server software serving this request */
|
|
||||||
envKeyValue("SERVER_PROTOCOL", "gopher"), /* MUST be set to name and version of application protocol used for this request */
|
|
||||||
envKeyValue("CONTENT_LENGTH", "0"), /* Contains size of message-body attached (always 0 so we set here) */
|
|
||||||
envKeyValue("REQUEST_METHOD", "GET"), /* MUST be set to method by which script should process request. Always GET */
|
|
||||||
|
|
||||||
/* Non-standard */
|
|
||||||
envKeyValue("PATH", path),
|
|
||||||
envKeyValue("COLUMNS", strconv.Itoa(Config.PageWidth)),
|
|
||||||
envKeyValue("GOPHER_CHARSET", charset),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Generate CGI environment */
|
|
||||||
func generateCgiEnvironment(responder *Responder) []string {
|
|
||||||
/* Get initial CgiEnv variables */
|
|
||||||
env := Config.CgiEnv
|
|
||||||
|
|
||||||
env = append(env, envKeyValue("SERVER_NAME", responder.Host.Name())) /* MUST be set to name of server host client is connecting to */
|
|
||||||
env = append(env, envKeyValue("SERVER_PORT", responder.Host.Port())) /* MUST be set to the server port that client is connecting to */
|
|
||||||
env = append(env, envKeyValue("REMOTE_ADDR", responder.Client.Ip())) /* Remote client addr, MUST be set */
|
|
||||||
env = append(env, envKeyValue("QUERY_STRING", responder.Request.Parameters)) /* URL encoded search or parameter string, MUST be set even if empty */
|
|
||||||
env = append(env, envKeyValue("SCRIPT_NAME", "/"+responder.Request.Path.Relative())) /* URI path (not URL encoded) which could identify the CGI script (rather than script's output) */
|
|
||||||
env = append(env, envKeyValue("SCRIPT_FILENAME", responder.Request.Path.Absolute())) /* Basically SCRIPT_NAME absolute path */
|
|
||||||
env = append(env, envKeyValue("SELECTOR", responder.Request.Path.Selector()))
|
|
||||||
env = append(env, envKeyValue("DOCUMENT_ROOT", responder.Request.Path.RootDir()))
|
|
||||||
env = append(env, envKeyValue("REQUEST_URI", "/"+responder.Request.Path.Relative()+responder.Request.Parameters))
|
|
||||||
|
|
||||||
return env
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Execute a CGI script (pointer to correct function) */
|
|
||||||
var executeCgi func(*Responder) *GophorError
|
|
||||||
|
|
||||||
/* Execute CGI script and serve as-is */
|
|
||||||
func executeCgiNoHttp(responder *Responder) *GophorError {
|
|
||||||
return execute(responder.Conn, generateCgiEnvironment(responder), responder.Request.Path.Absolute())
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Execute CGI script and strip HTTP headers */
|
|
||||||
func executeCgiStripHttp(responder *Responder) *GophorError {
|
|
||||||
/* HTTP header stripping writer that also parses HTTP status codes */
|
|
||||||
httpStripWriter := NewHttpStripWriter(responder.Conn)
|
|
||||||
|
|
||||||
/* Execute the CGI script using the new httpStripWriter */
|
|
||||||
gophorErr := execute(httpStripWriter, generateCgiEnvironment(responder), responder.Request.Path.Absolute())
|
|
||||||
|
|
||||||
/* httpStripWriter's error takes priority as it might have parsed the status code */
|
|
||||||
cgiStatusErr := httpStripWriter.FinishUp()
|
|
||||||
if cgiStatusErr != nil {
|
|
||||||
return cgiStatusErr
|
|
||||||
} else {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Execute any file (though only allowed are gophermaps) */
|
|
||||||
func executeFile(responder *Responder) *GophorError {
|
|
||||||
return execute(responder.Conn, Config.Env, responder.Request.Path.Absolute())
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Execute a supplied path with arguments and environment, to writer */
|
|
||||||
func execute(writer io.Writer, env []string, path string) *GophorError {
|
|
||||||
/* If CGI disbabled, just return error */
|
|
||||||
if !Config.CgiEnabled {
|
|
||||||
return &GophorError{ CgiDisabledErr, nil }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup command */
|
|
||||||
cmd := exec.Command(path)
|
|
||||||
|
|
||||||
/* Set new proccess group id */
|
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
|
||||||
|
|
||||||
/* Setup cmd env */
|
|
||||||
cmd.Env = env
|
|
||||||
|
|
||||||
/* Setup out buffer */
|
|
||||||
cmd.Stdout = writer
|
|
||||||
|
|
||||||
/* Start executing! */
|
|
||||||
err := cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
return &GophorError{ CommandStartErr, err }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup timer goroutine to kill cmd after x time */
|
|
||||||
go func() {
|
|
||||||
time.Sleep(Config.MaxExecRunTime)
|
|
||||||
|
|
||||||
if cmd.ProcessState != nil {
|
|
||||||
/* We've already finished */
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get process group id */
|
|
||||||
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
|
||||||
if err != nil {
|
|
||||||
Config.SysLog.Fatal("", "Process unfinished, PGID not found!\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Kill process group! */
|
|
||||||
err = syscall.Kill(-pgid, syscall.SIGTERM)
|
|
||||||
if err != nil {
|
|
||||||
Config.SysLog.Fatal("", "Error stopping process group %d: %s\n", pgid, err.Error())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
/* Wait for command to finish, get exit code */
|
|
||||||
err = cmd.Wait()
|
|
||||||
exitCode := 0
|
|
||||||
if err != nil {
|
|
||||||
/* Error, try to get exit code */
|
|
||||||
exitError, ok := err.(*exec.ExitError)
|
|
||||||
if ok {
|
|
||||||
waitStatus := exitError.Sys().(syscall.WaitStatus)
|
|
||||||
exitCode = waitStatus.ExitStatus()
|
|
||||||
} else {
|
|
||||||
exitCode = 1
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* No error! Get exit code direct from command */
|
|
||||||
waitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus)
|
|
||||||
exitCode = waitStatus.ExitStatus()
|
|
||||||
}
|
|
||||||
|
|
||||||
if exitCode != 0 {
|
|
||||||
/* If non-zero exit code return error */
|
|
||||||
Config.SysLog.Error("", "Error executing: %s\n", path)
|
|
||||||
return &GophorError{ CommandExitCodeErr, err }
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Just neatens creating an environment KEY=VALUE string */
|
|
||||||
func envKeyValue(key, value string) string {
|
|
||||||
return key+"="+value
|
|
||||||
}
|
|
@ -1,353 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"bufio"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileContents interface {
|
|
||||||
/* Interface that provides an adaptable implementation
|
|
||||||
* for holding onto some level of information about the
|
|
||||||
* contents of a file.
|
|
||||||
*/
|
|
||||||
Render(*Responder) *GophorError
|
|
||||||
Load() *GophorError
|
|
||||||
Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
type GeneratedFileContents struct {
|
|
||||||
/* Super simple, holds onto a slice of bytes */
|
|
||||||
|
|
||||||
Contents []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *GeneratedFileContents) Render(responder *Responder) *GophorError {
|
|
||||||
return responder.WriteData(fc.Contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *GeneratedFileContents) Load() *GophorError {
|
|
||||||
/* do nothing */
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *GeneratedFileContents) Clear() {
|
|
||||||
/* do nothing */
|
|
||||||
}
|
|
||||||
|
|
||||||
type RegularFileContents struct {
|
|
||||||
/* Simple implemention that holds onto a RequestPath
|
|
||||||
* and slice containing cache'd content
|
|
||||||
*/
|
|
||||||
|
|
||||||
Path *RequestPath
|
|
||||||
Contents []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *RegularFileContents) Render(responder *Responder) *GophorError {
|
|
||||||
return responder.WriteData(fc.Contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *RegularFileContents) Load() *GophorError {
|
|
||||||
/* Load the file into memory */
|
|
||||||
var gophorErr *GophorError
|
|
||||||
fc.Contents, gophorErr = bufferedRead(fc.Path.Absolute())
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fc *RegularFileContents) Clear() {
|
|
||||||
fc.Contents = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapContents struct {
|
|
||||||
/* Holds onto a RequestPath and slice containing individually
|
|
||||||
* renderable sections of the gophermap.
|
|
||||||
*/
|
|
||||||
|
|
||||||
Request *Request
|
|
||||||
Sections []GophermapSection
|
|
||||||
}
|
|
||||||
|
|
||||||
func (gc *GophermapContents) Render(responder *Responder) *GophorError {
|
|
||||||
/* Render and send each of the gophermap sections */
|
|
||||||
var gophorErr *GophorError
|
|
||||||
for _, line := range gc.Sections {
|
|
||||||
gophorErr = line.Render(responder)
|
|
||||||
if gophorErr != nil {
|
|
||||||
Config.SysLog.Error("", "Error executing gophermap contents: %s\n", gophorErr.Error())
|
|
||||||
return &GophorError{ InvalidGophermapErr, gophorErr }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* End on footer text (including lastline) */
|
|
||||||
return responder.WriteData(Config.FooterText)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (gc *GophermapContents) Load() *GophorError {
|
|
||||||
/* Load the gophermap into memory as gophermap sections */
|
|
||||||
var gophorErr *GophorError
|
|
||||||
gc.Sections, gophorErr = readGophermap(gc.Request)
|
|
||||||
if gophorErr != nil {
|
|
||||||
return &GophorError{ InvalidGophermapErr, gophorErr }
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (gc *GophermapContents) Clear() {
|
|
||||||
gc.Sections = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapSection interface {
|
|
||||||
/* Interface for storing differring types of gophermap
|
|
||||||
* sections to render when necessary
|
|
||||||
*/
|
|
||||||
|
|
||||||
Render(*Responder) *GophorError
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapTextSection struct {
|
|
||||||
Contents []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *GophermapTextSection) Render(responder *Responder) *GophorError {
|
|
||||||
return responder.WriteData(replaceStrings(string(s.Contents), responder.Host))
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapDirectorySection struct {
|
|
||||||
/* Holds onto a directory path, and a list of files
|
|
||||||
* to hide from the client when rendering.
|
|
||||||
*/
|
|
||||||
|
|
||||||
Request *Request
|
|
||||||
Hidden map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GophermapDirectorySection) Render(responder *Responder) *GophorError {
|
|
||||||
/* Create new responder from supplied and using stored path */
|
|
||||||
return listDir(responder.CloneWithRequest(g.Request), g.Hidden)
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapFileSection struct {
|
|
||||||
/* Holds onto a file path to be read and rendered when requested */
|
|
||||||
Request *Request
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GophermapFileSection) Render(responder *Responder) *GophorError {
|
|
||||||
fileContents, gophorErr := readIntoGophermap(g.Request.Path.Absolute())
|
|
||||||
if gophorErr != nil {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
return responder.WriteData(fileContents)
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapSubmapSection struct {
|
|
||||||
/* Holds onto a gophermap path to be read and rendered when requested */
|
|
||||||
Request *Request
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GophermapSubmapSection) Render(responder *Responder) *GophorError {
|
|
||||||
/* Load the gophermap into memory as gophermap sections */
|
|
||||||
sections, gophorErr := readGophermap(g.Request)
|
|
||||||
if gophorErr != nil {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Render and send each of the gophermap sections */
|
|
||||||
for _, line := range sections {
|
|
||||||
gophorErr = line.Render(responder)
|
|
||||||
if gophorErr != nil {
|
|
||||||
Config.SysLog.Error("", "Error executing gophermap contents: %s\n", gophorErr.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapExecCgiSection struct {
|
|
||||||
/* Holds onto a request with CGI script path and supplied parameters */
|
|
||||||
Request *Request
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GophermapExecCgiSection) Render(responder *Responder) *GophorError {
|
|
||||||
/* Create new filesystem request from mixture of stored + supplied */
|
|
||||||
return executeCgi(responder.CloneWithRequest(g.Request))
|
|
||||||
}
|
|
||||||
|
|
||||||
type GophermapExecFileSection struct {
|
|
||||||
/* Holds onto a request with executable file path and supplied arguments */
|
|
||||||
Request *Request
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GophermapExecFileSection) Render(responder *Responder) *GophorError {
|
|
||||||
/* Create new responder from supplied and using stored path */
|
|
||||||
return executeFile(responder.CloneWithRequest(g.Request))
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read and parse a gophermap into separately cacheable and renderable GophermapSection */
|
|
||||||
func readGophermap(request *Request) ([]GophermapSection, *GophorError) {
|
|
||||||
/* Create return slice */
|
|
||||||
sections := make([]GophermapSection, 0)
|
|
||||||
|
|
||||||
/* Create hidden files map now in case dir listing requested */
|
|
||||||
hidden := map[string]bool{
|
|
||||||
request.Path.Relative(): true, /* Ignore current gophermap */
|
|
||||||
CgiBinDirStr: true, /* Ignore cgi-bin if found */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Keep track of whether we've already come across a title line (only 1 allowed!) */
|
|
||||||
titleAlready := false
|
|
||||||
|
|
||||||
/* Error setting within nested function below */
|
|
||||||
var returnErr *GophorError
|
|
||||||
|
|
||||||
/* Perform buffered scan with our supplied splitter and iterators */
|
|
||||||
gophorErr := bufferedScan(request.Path.Absolute(),
|
|
||||||
func(scanner *bufio.Scanner) bool {
|
|
||||||
line := scanner.Text()
|
|
||||||
|
|
||||||
/* Parse the line item type and handle */
|
|
||||||
lineType := parseLineType(line)
|
|
||||||
switch lineType {
|
|
||||||
case TypeInfoNotStated:
|
|
||||||
/* Append TypeInfo to the beginning of line */
|
|
||||||
sections = append(sections, &GophermapTextSection{ buildInfoLine(line) })
|
|
||||||
|
|
||||||
case TypeTitle:
|
|
||||||
/* Reformat title line to send as info line with appropriate selector */
|
|
||||||
if !titleAlready {
|
|
||||||
sections = append(sections, &GophermapTextSection{ buildLine(TypeInfo, line[1:], "TITLE", NullHost, NullPort) })
|
|
||||||
titleAlready = true
|
|
||||||
}
|
|
||||||
|
|
||||||
case TypeComment:
|
|
||||||
/* We ignore this line */
|
|
||||||
break
|
|
||||||
|
|
||||||
case TypeHiddenFile:
|
|
||||||
/* Add to hidden files map */
|
|
||||||
hidden[request.Path.JoinRel(line[1:])] = true
|
|
||||||
|
|
||||||
case TypeSubGophermap:
|
|
||||||
/* Parse new RequestPath and parameters */
|
|
||||||
subRequest, gophorErr := parseLineRequestString(request.Path, line[1:])
|
|
||||||
if gophorErr != nil {
|
|
||||||
/* Failed parsing line request string, set returnErr and request finish */
|
|
||||||
returnErr = gophorErr
|
|
||||||
return true
|
|
||||||
} else if subRequest.Path.Relative() == "" || subRequest.Path.Relative() == request.Path.Relative() {
|
|
||||||
/* Failed parsing line request string, or we've been supplied same gophermap, and recursion is
|
|
||||||
* recursion is recursion is bad kids! Set return error and request finish.
|
|
||||||
*/
|
|
||||||
returnErr = &GophorError{ InvalidRequestErr, nil }
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Perform file stat */
|
|
||||||
stat, err := os.Stat(subRequest.Path.Absolute())
|
|
||||||
if (err != nil) || (stat.Mode() & os.ModeDir != 0) {
|
|
||||||
/* File read error or is directory */
|
|
||||||
returnErr = &GophorError{ FileStatErr, err }
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if we've been supplied subgophermap or regular file */
|
|
||||||
if isGophermap(subRequest.Path.Relative()) {
|
|
||||||
/* If executable, store as GophermapExecFileSection, else GophermapSubmapSection */
|
|
||||||
if stat.Mode().Perm() & 0100 != 0 {
|
|
||||||
sections = append(sections, &GophermapExecFileSection { subRequest })
|
|
||||||
} else {
|
|
||||||
sections = append(sections, &GophermapSubmapSection{ subRequest })
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* If stored in cgi-bin store as GophermapExecCgiSection, else GophermapFileSection */
|
|
||||||
if withinCgiBin(subRequest.Path.Relative()) {
|
|
||||||
sections = append(sections, &GophermapExecCgiSection{ subRequest })
|
|
||||||
} else {
|
|
||||||
sections = append(sections, &GophermapFileSection{ subRequest })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case TypeEnd:
|
|
||||||
/* Lastline, break out at end of loop. GophermapContents.Render() will
|
|
||||||
* append a LastLine string so we don't have to worry about that here.
|
|
||||||
*/
|
|
||||||
return false
|
|
||||||
|
|
||||||
case TypeEndBeginList:
|
|
||||||
/* Append GophermapDirectorySection object then break, as with TypeEnd. */
|
|
||||||
dirRequest := &Request{ NewRequestPath(request.Path.RootDir(), request.Path.TrimRelSuffix(GophermapFileStr)), "" }
|
|
||||||
sections = append(sections, &GophermapDirectorySection{ dirRequest, hidden })
|
|
||||||
return false
|
|
||||||
|
|
||||||
default:
|
|
||||||
/* Default is appending to sections slice as GopherMapTextSection */
|
|
||||||
sections = append(sections, &GophermapTextSection{ []byte(line+DOSLineEnd) })
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Check the bufferedScan didn't exit with error */
|
|
||||||
if gophorErr != nil {
|
|
||||||
return nil, gophorErr
|
|
||||||
} else if returnErr != nil {
|
|
||||||
return nil, returnErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return sections, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read a text file into a gophermap as text sections */
|
|
||||||
func readIntoGophermap(path string) ([]byte, *GophorError) {
|
|
||||||
/* Create return slice */
|
|
||||||
fileContents := make([]byte, 0)
|
|
||||||
|
|
||||||
/* Perform buffered scan with our supplied iterator */
|
|
||||||
gophorErr := bufferedScan(path,
|
|
||||||
func(scanner *bufio.Scanner) bool {
|
|
||||||
line := scanner.Text()
|
|
||||||
|
|
||||||
if line == "" {
|
|
||||||
fileContents = append(fileContents, buildInfoLine("")...)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Replace the newline characters */
|
|
||||||
line = replaceNewLines(line)
|
|
||||||
|
|
||||||
/* Iterate through line string, reflowing to new line
|
|
||||||
* until all lines < PageWidth
|
|
||||||
*/
|
|
||||||
for len(line) > 0 {
|
|
||||||
length := minWidth(len(line))
|
|
||||||
fileContents = append(fileContents, buildInfoLine(line[:length])...)
|
|
||||||
line = line[length:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Check the bufferedScan didn't exit with error */
|
|
||||||
if gophorErr != nil {
|
|
||||||
return nil, gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check final output ends on a newline */
|
|
||||||
if !bytes.HasSuffix(fileContents, []byte(DOSLineEnd)) {
|
|
||||||
fileContents = append(fileContents, []byte(DOSLineEnd)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileContents, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return minimum width out of PageWidth and W */
|
|
||||||
func minWidth(w int) int {
|
|
||||||
if w <= Config.PageWidth {
|
|
||||||
return w
|
|
||||||
} else {
|
|
||||||
return Config.PageWidth
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,351 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
/* Help converting file size stat to supplied size in megabytes */
|
|
||||||
BytesInMegaByte = 1048576.0
|
|
||||||
|
|
||||||
/* Filename constants */
|
|
||||||
CgiBinDirStr = "cgi-bin"
|
|
||||||
GophermapFileStr = "gophermap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileSystem struct {
|
|
||||||
/* Holds and helps manage our file cache, as well as managing
|
|
||||||
* access and responses to requests submitted a worker instance.
|
|
||||||
*/
|
|
||||||
|
|
||||||
CacheMap *FixedMap
|
|
||||||
CacheMutex sync.RWMutex
|
|
||||||
CacheFileMax int64
|
|
||||||
Remaps []*FileRemap
|
|
||||||
Restricted []*regexp.Regexp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FileSystem) Init(size int, fileSizeMax float64) {
|
|
||||||
fs.CacheMap = NewFixedMap(size)
|
|
||||||
fs.CacheMutex = sync.RWMutex{}
|
|
||||||
fs.CacheFileMax = int64(BytesInMegaByte * fileSizeMax)
|
|
||||||
/* .Remaps and .Restricted are handled within gopher.go */
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FileSystem) IsRestricted(path string) bool {
|
|
||||||
for _, regex := range fs.Restricted {
|
|
||||||
if regex.MatchString(path) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FileSystem) RemapRequestPath(requestPath *RequestPath) (*RequestPath, bool) {
|
|
||||||
for _, remap := range fs.Remaps {
|
|
||||||
/* No match :( keep lookin */
|
|
||||||
if !remap.Regex.MatchString(requestPath.Relative()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create new path from template and submatches */
|
|
||||||
newPath := make([]byte, 0)
|
|
||||||
for _, submatches := range remap.Regex.FindAllStringSubmatchIndex(requestPath.Relative(), -1) {
|
|
||||||
newPath = remap.Regex.ExpandString(newPath, remap.Template, requestPath.Relative(), submatches)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ignore empty replacement path */
|
|
||||||
if len(newPath) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set this new path to the _actual_ path */
|
|
||||||
return requestPath.RemapPath(string(newPath)), true
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FileSystem) HandleRequest(responder *Responder) *GophorError {
|
|
||||||
/* Check if restricted file */
|
|
||||||
if fs.IsRestricted(responder.Request.Path.Relative()) {
|
|
||||||
return &GophorError{ IllegalPathErr, nil }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Try remap according to supplied regex */
|
|
||||||
remap, doneRemap := fs.RemapRequestPath(responder.Request.Path)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
var stat os.FileInfo
|
|
||||||
if doneRemap {
|
|
||||||
/* Try get the remapped path */
|
|
||||||
stat, err = os.Stat(remap.Absolute())
|
|
||||||
if err == nil {
|
|
||||||
/* Remapped path exists, set this! */
|
|
||||||
responder.Request.Path = remap
|
|
||||||
} else {
|
|
||||||
/* Last ditch effort to grab generated file */
|
|
||||||
return fs.FetchGeneratedFile(responder, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Just get regular supplied request path */
|
|
||||||
stat, err = os.Stat(responder.Request.Path.Absolute())
|
|
||||||
if err != nil {
|
|
||||||
/* Last ditch effort to grab generated file */
|
|
||||||
return fs.FetchGeneratedFile(responder, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
/* Directory */
|
|
||||||
case stat.Mode() & os.ModeDir != 0:
|
|
||||||
/* Ignore anything under cgi-bin directory */
|
|
||||||
if withinCgiBin(responder.Request.Path.Relative()) {
|
|
||||||
return &GophorError{ IllegalPathErr, nil }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check Gophermap exists */
|
|
||||||
gophermapPath := NewRequestPath(responder.Request.Path.RootDir(), responder.Request.Path.JoinRel(GophermapFileStr))
|
|
||||||
stat, err = os.Stat(gophermapPath.Absolute())
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
/* Gophermap exists! If executable try return executed contents, else serve as regular gophermap. */
|
|
||||||
gophermapRequest := &Request{ gophermapPath, responder.Request.Parameters }
|
|
||||||
responder.Request = gophermapRequest
|
|
||||||
|
|
||||||
if stat.Mode().Perm() & 0100 != 0 {
|
|
||||||
return executeFile(responder)
|
|
||||||
} else {
|
|
||||||
return fs.FetchFile(responder)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* No gophermap, serve directory listing */
|
|
||||||
return listDirAsGophermap(responder, map[string]bool{ gophermapPath.Relative(): true, CgiBinDirStr: true })
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Regular file */
|
|
||||||
case stat.Mode() & os.ModeType == 0:
|
|
||||||
/* If cgi-bin, try return executed contents. Else, fetch regular file */
|
|
||||||
if responder.Request.Path.HasRelPrefix(CgiBinDirStr) {
|
|
||||||
return executeCgi(responder)
|
|
||||||
} else {
|
|
||||||
return fs.FetchFile(responder)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Unsupported type */
|
|
||||||
default:
|
|
||||||
return &GophorError{ FileTypeErr, nil }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FileSystem) FetchGeneratedFile(responder *Responder, err error) *GophorError {
|
|
||||||
fs.CacheMutex.RLock()
|
|
||||||
file := fs.CacheMap.Get(responder.Request.Path.Absolute())
|
|
||||||
if file == nil {
|
|
||||||
/* Generated file at path not in cache map either, return */
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
return &GophorError{ FileStatErr, err }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* It's there! Get contents! */
|
|
||||||
file.Mutex.RLock()
|
|
||||||
gophorErr := file.WriteContents(responder)
|
|
||||||
file.Mutex.RUnlock()
|
|
||||||
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FileSystem) FetchFile(responder *Responder) *GophorError {
|
|
||||||
/* Get cache map read lock then check if file in cache map */
|
|
||||||
fs.CacheMutex.RLock()
|
|
||||||
file := fs.CacheMap.Get(responder.Request.Path.Absolute())
|
|
||||||
|
|
||||||
if file != nil {
|
|
||||||
/* File in cache -- before doing anything get file read lock */
|
|
||||||
file.Mutex.RLock()
|
|
||||||
|
|
||||||
/* Check file is marked as fresh */
|
|
||||||
if !file.Fresh {
|
|
||||||
/* File not fresh! Swap file read for write-lock */
|
|
||||||
file.Mutex.RUnlock()
|
|
||||||
file.Mutex.Lock()
|
|
||||||
|
|
||||||
/* Reload file contents from disk */
|
|
||||||
gophorErr := file.CacheContents()
|
|
||||||
if gophorErr != nil {
|
|
||||||
/* Error loading contents, unlock all mutex then return error */
|
|
||||||
file.Mutex.Unlock()
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Updated! Swap back file write for read lock */
|
|
||||||
file.Mutex.Unlock()
|
|
||||||
file.Mutex.RLock()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Open file here, to check it exists, ready for file stat
|
|
||||||
* and in case file is too big we pass it as a raw response
|
|
||||||
*/
|
|
||||||
fd, err := os.Open(responder.Request.Path.Absolute())
|
|
||||||
if err != nil {
|
|
||||||
/* Error stat'ing file, unlock read mutex then return error */
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
return &GophorError{ FileOpenErr, err }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We need a doctor, stat! */
|
|
||||||
stat, err := fd.Stat()
|
|
||||||
if err != nil {
|
|
||||||
/* Error stat'ing file, unlock read mutext then return */
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
return &GophorError{ FileStatErr, err }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compare file size (in MB) to CacheFileSizeMax. If larger, just send file raw */
|
|
||||||
if stat.Size() > fs.CacheFileMax {
|
|
||||||
/* Unlock the read mutex, we don't need it where we're going... returning, we're returning. */
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
return responder.WriteRaw(fd)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create new file contents */
|
|
||||||
var contents FileContents
|
|
||||||
if isGophermap(responder.Request.Path.Relative()) {
|
|
||||||
contents = &GophermapContents{ responder.Request, nil }
|
|
||||||
} else {
|
|
||||||
contents = &RegularFileContents{ responder.Request.Path, nil }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create new file wrapper around contents */
|
|
||||||
file = &File{ contents, sync.RWMutex{}, true, time.Now().UnixNano() }
|
|
||||||
|
|
||||||
/* File isn't in cache yet so no need to get file lock mutex */
|
|
||||||
gophorErr := file.CacheContents()
|
|
||||||
if gophorErr != nil {
|
|
||||||
/* Error loading contents, unlock read mutex then return error */
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* File not in cache -- Swap cache map read for write lock. */
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
fs.CacheMutex.Lock()
|
|
||||||
|
|
||||||
/* Put file in the FixedMap */
|
|
||||||
fs.CacheMap.Put(responder.Request.Path.Absolute(), file)
|
|
||||||
|
|
||||||
/* Before unlocking cache mutex, lock file read for upcoming call to .Contents() */
|
|
||||||
file.Mutex.RLock()
|
|
||||||
|
|
||||||
/* Swap cache lock back to read */
|
|
||||||
fs.CacheMutex.Unlock()
|
|
||||||
fs.CacheMutex.RLock()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Write file contents via responder */
|
|
||||||
gophorErr := file.WriteContents(responder)
|
|
||||||
file.Mutex.RUnlock()
|
|
||||||
|
|
||||||
/* Finally we can unlock the cache map read lock, we are done :) */
|
|
||||||
fs.CacheMutex.RUnlock()
|
|
||||||
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
type File struct {
|
|
||||||
/* Wraps around the cached contents of a file
|
|
||||||
* helping with management.
|
|
||||||
*/
|
|
||||||
|
|
||||||
Content FileContents
|
|
||||||
Mutex sync.RWMutex
|
|
||||||
Fresh bool
|
|
||||||
LastRefresh int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) WriteContents(responder *Responder) *GophorError {
|
|
||||||
return f.Content.Render(responder)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *File) CacheContents() *GophorError {
|
|
||||||
/* Clear current file contents */
|
|
||||||
f.Content.Clear()
|
|
||||||
|
|
||||||
/* Reload the file */
|
|
||||||
gophorErr := f.Content.Load()
|
|
||||||
if gophorErr != nil {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update lastRefresh, set fresh, unset deletion (not likely set) */
|
|
||||||
f.LastRefresh = time.Now().UnixNano()
|
|
||||||
f.Fresh = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Start the file monitor! */
|
|
||||||
func startFileMonitor(sleepTime time.Duration) {
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
/* Sleep so we don't take up all the precious CPU time :) */
|
|
||||||
time.Sleep(sleepTime)
|
|
||||||
|
|
||||||
/* Check global file cache freshness */
|
|
||||||
checkCacheFreshness()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We shouldn't have reached here */
|
|
||||||
Config.SysLog.Fatal("", "FileCache monitor escaped run loop!\n")
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check file cache for freshness, deleting files not-on disk */
|
|
||||||
func checkCacheFreshness() {
|
|
||||||
/* Before anything, get cache write lock (in case we have to delete) */
|
|
||||||
Config.FileSystem.CacheMutex.Lock()
|
|
||||||
|
|
||||||
/* Iterate through paths in cache map to query file last modified times */
|
|
||||||
for path := range Config.FileSystem.CacheMap.Map {
|
|
||||||
/* Get file pointer, no need for lock as we have write lock */
|
|
||||||
file := Config.FileSystem.CacheMap.Get(path)
|
|
||||||
|
|
||||||
/* If this is a generated file, we skip */
|
|
||||||
if isGeneratedType(file) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check file still exists on disk, delete and continue if not */
|
|
||||||
stat, err := os.Stat(path)
|
|
||||||
if err != nil {
|
|
||||||
Config.SysLog.Error("", "Failed to stat file in cache: %s\n", path)
|
|
||||||
Config.FileSystem.CacheMap.Remove(path)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get file's last modified time */
|
|
||||||
timeModified := stat.ModTime().UnixNano()
|
|
||||||
|
|
||||||
/* If the file is marked as fresh, but file on disk is newer, mark as unfresh */
|
|
||||||
if file.Fresh && file.LastRefresh < timeModified {
|
|
||||||
file.Fresh = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Done! We can release cache read lock */
|
|
||||||
Config.FileSystem.CacheMutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Just a helper function to neaten-up checking if file contents is of generated type */
|
|
||||||
func isGeneratedType(file *File) bool {
|
|
||||||
switch file.Content.(type) {
|
|
||||||
case *GeneratedFileContents:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,202 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
"bufio"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
FileReadBufSize = 1024
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Perform simple buffered read on a file at path */
|
|
||||||
func bufferedRead(path string) ([]byte, *GophorError) {
|
|
||||||
/* Open file */
|
|
||||||
fd, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, &GophorError{ FileOpenErr, err }
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
/* Setup buffers */
|
|
||||||
var count int
|
|
||||||
contents := make([]byte, 0)
|
|
||||||
buf := make([]byte, FileReadBufSize)
|
|
||||||
|
|
||||||
/* Setup reader */
|
|
||||||
reader := bufio.NewReader(fd)
|
|
||||||
|
|
||||||
/* Read through buffer until error or null bytes! */
|
|
||||||
for {
|
|
||||||
count, err = reader.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, &GophorError{ FileReadErr, err }
|
|
||||||
}
|
|
||||||
|
|
||||||
contents = append(contents, buf[:count]...)
|
|
||||||
|
|
||||||
if count < FileReadBufSize {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return contents, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Perform buffered read on file at path, then scan through with supplied iterator func */
|
|
||||||
func bufferedScan(path string, scanIterator func(*bufio.Scanner) bool) *GophorError {
|
|
||||||
/* First, read raw file contents */
|
|
||||||
contents, gophorErr := bufferedRead(path)
|
|
||||||
if gophorErr != nil {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create reader and scanner from this */
|
|
||||||
reader := bytes.NewReader(contents)
|
|
||||||
scanner := bufio.NewScanner(reader)
|
|
||||||
|
|
||||||
/* If contains DOS line-endings, split by DOS! Else, split by Unix */
|
|
||||||
if bytes.Contains(contents, []byte(DOSLineEnd)) {
|
|
||||||
scanner.Split(dosLineEndSplitter)
|
|
||||||
} else {
|
|
||||||
scanner.Split(unixLineEndSplitter)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Scan through file contents using supplied iterator */
|
|
||||||
for scanner.Scan() && scanIterator(scanner) {}
|
|
||||||
|
|
||||||
/* Check scanner finished cleanly */
|
|
||||||
if scanner.Err() != nil {
|
|
||||||
return &GophorError{ FileReadErr, scanner.Err() }
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Split on DOS line end */
|
|
||||||
func dosLineEndSplitter(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
|
||||||
if atEOF && len(data) == 0 {
|
|
||||||
/* At EOF, no more data */
|
|
||||||
return 0, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if i := bytes.Index(data, []byte("\r\n")); i >= 0 {
|
|
||||||
/* We have a full new-line terminate line */
|
|
||||||
return i+2, data[:i], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Request more data */
|
|
||||||
return 0, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Split on unix line end */
|
|
||||||
func unixLineEndSplitter(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
|
||||||
if atEOF && len(data) == 0 {
|
|
||||||
/* At EOF, no more data */
|
|
||||||
return 0, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if i := bytes.Index(data, []byte("\n")); i >= 0 {
|
|
||||||
/* We have a full new-line terminate line */
|
|
||||||
return i+1, data[:i], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Request more data */
|
|
||||||
return 0, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* List the files in directory, hiding those requested, including title and footer */
|
|
||||||
func listDirAsGophermap(responder *Responder, hidden map[string]bool) *GophorError {
|
|
||||||
/* Write title */
|
|
||||||
gophorErr := responder.WriteData(append(buildLine(TypeInfo, "[ "+responder.Host.Name()+responder.Request.Path.Selector()+" ]", "TITLE", NullHost, NullPort), buildInfoLine("")...))
|
|
||||||
if gophorErr != nil {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Writer a 'back' entry. GoLang Readdir() seems to miss this */
|
|
||||||
gophorErr = responder.WriteData(buildLine(TypeDirectory, "..", responder.Request.Path.JoinSelector(".."), responder.Host.Name(), responder.Host.Port()))
|
|
||||||
if gophorErr != nil {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Write the actual directory entry */
|
|
||||||
gophorErr = listDir(responder, hidden)
|
|
||||||
if gophorErr != nil {
|
|
||||||
return gophorErr
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Finally write footer */
|
|
||||||
return responder.WriteData(Config.FooterText)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* List the files in a directory, hiding those requested */
|
|
||||||
func listDir(responder *Responder, hidden map[string]bool) *GophorError {
|
|
||||||
/* Open directory file descriptor */
|
|
||||||
fd, err := os.Open(responder.Request.Path.Absolute())
|
|
||||||
if err != nil {
|
|
||||||
Config.SysLog.Error("", "failed to open %s: %s\n", responder.Request.Path.Absolute(), err.Error())
|
|
||||||
return &GophorError{ FileOpenErr, err }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Read files in directory */
|
|
||||||
files, err := fd.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
Config.SysLog.Error("", "failed to enumerate dir %s: %s\n", responder.Request.Path.Absolute(), err.Error())
|
|
||||||
return &GophorError{ DirListErr, err }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Sort the files by name */
|
|
||||||
sort.Sort(byName(files))
|
|
||||||
|
|
||||||
/* Create directory content slice, ready */
|
|
||||||
dirContents := make([]byte, 0)
|
|
||||||
|
|
||||||
/* Walk through files :D */
|
|
||||||
var reqPath *RequestPath
|
|
||||||
for _, file := range files {
|
|
||||||
reqPath = NewRequestPath(responder.Request.Path.RootDir(), responder.Request.Path.JoinRel(file.Name()))
|
|
||||||
|
|
||||||
/* If hidden file, or restricted file, continue! */
|
|
||||||
if isHiddenFile(hidden, reqPath.Relative()) /*|| isRestrictedFile(reqPath.Relative())*/ {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Handle file, directory or ignore others */
|
|
||||||
switch {
|
|
||||||
case file.Mode() & os.ModeDir != 0:
|
|
||||||
/* Directory -- create directory listing */
|
|
||||||
dirContents = append(dirContents, buildLine(TypeDirectory, file.Name(), reqPath.Selector(), responder.Host.Name(), responder.Host.Port())...)
|
|
||||||
|
|
||||||
case file.Mode() & os.ModeType == 0:
|
|
||||||
/* Regular file -- find item type and creating listing */
|
|
||||||
itemPath := reqPath.Selector()
|
|
||||||
itemType := getItemType(itemPath)
|
|
||||||
dirContents = append(dirContents, buildLine(itemType, file.Name(), reqPath.Selector(), responder.Host.Name(), responder.Host.Port())...)
|
|
||||||
|
|
||||||
default:
|
|
||||||
/* Ignore */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Finally write dirContents and return result */
|
|
||||||
return responder.WriteData(dirContents)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Helper function to simple checking in map */
|
|
||||||
func isHiddenFile(hiddenMap map[string]bool, fileName string) bool {
|
|
||||||
_, ok := hiddenMap[fileName]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Took a leaf out of go-gopher's book here. */
|
|
||||||
type byName []os.FileInfo
|
|
||||||
func (s byName) Len() int { return len(s) }
|
|
||||||
func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
|
||||||
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
@ -1,78 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/list"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* TODO: work on efficiency. use our own lower level data structure? */
|
|
||||||
|
|
||||||
/* FixedMap:
|
|
||||||
* A fixed size map that pushes the last
|
|
||||||
* used value from the stack if size limit
|
|
||||||
* is reached.
|
|
||||||
*/
|
|
||||||
type FixedMap struct {
|
|
||||||
Map map[string]*MapElement
|
|
||||||
List *list.List
|
|
||||||
Size int
|
|
||||||
}
|
|
||||||
|
|
||||||
/* MapElement:
|
|
||||||
* Simple structure to wrap pointer to list
|
|
||||||
* element and stored map value together.
|
|
||||||
*/
|
|
||||||
type MapElement struct {
|
|
||||||
Element *list.Element
|
|
||||||
Value *File
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFixedMap(size int) *FixedMap {
|
|
||||||
return &FixedMap{
|
|
||||||
make(map[string]*MapElement),
|
|
||||||
list.New(),
|
|
||||||
size,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get file in map for key, or nil */
|
|
||||||
func (fm *FixedMap) Get(key string) *File {
|
|
||||||
elem, ok := fm.Map[key]
|
|
||||||
if ok {
|
|
||||||
/* And that's an LRU implementation folks! */
|
|
||||||
fm.List.MoveToFront(elem.Element)
|
|
||||||
return elem.Value
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Put file in map as key, pushing out last file if size limit reached */
|
|
||||||
func (fm *FixedMap) Put(key string, value *File) {
|
|
||||||
element := fm.List.PushFront(key)
|
|
||||||
fm.Map[key] = &MapElement{ element, value }
|
|
||||||
|
|
||||||
if fm.List.Len() > fm.Size {
|
|
||||||
/* We're at capacity! SIR! */
|
|
||||||
element = fm.List.Back()
|
|
||||||
|
|
||||||
/* We don't check here as we know this is ALWAYS a string */
|
|
||||||
key, _ := element.Value.(string)
|
|
||||||
|
|
||||||
/* Finally delete the map entry and list element! */
|
|
||||||
delete(fm.Map, key)
|
|
||||||
fm.List.Remove(element)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Try delete element, else do nothing */
|
|
||||||
func (fm *FixedMap) Remove(key string) {
|
|
||||||
elem, ok := fm.Map[key]
|
|
||||||
if !ok {
|
|
||||||
/* We don't have this key, return */
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remove the selected element */
|
|
||||||
delete(fm.Map, key)
|
|
||||||
fm.List.Remove(elem.Element)
|
|
||||||
}
|
|
@ -1,50 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Formats an info-text footer from string. Add last line as we use the footer to contain last line (regardless if empty) */
|
|
||||||
func formatGophermapFooter(text string, useSeparator bool) []byte {
|
|
||||||
ret := make([]byte, 0)
|
|
||||||
if text != "" {
|
|
||||||
ret = append(ret, buildInfoLine("")...)
|
|
||||||
if useSeparator {
|
|
||||||
ret = append(ret, buildInfoLine(buildLineSeparator(Config.PageWidth))...)
|
|
||||||
}
|
|
||||||
for _, line := range strings.Split(text, "\n") {
|
|
||||||
ret = append(ret, buildInfoLine(line)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return append(ret, []byte(LastLine)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Replace standard replacement strings */
|
|
||||||
func replaceStrings(str string, connHost *ConnHost) []byte {
|
|
||||||
/* We only replace the actual host and port values */
|
|
||||||
split := strings.Split(str, Tab)
|
|
||||||
if len(split) < 4 {
|
|
||||||
return []byte(str)
|
|
||||||
}
|
|
||||||
|
|
||||||
split[2] = strings.Replace(split[2], ReplaceStrHostname, connHost.Name(), -1)
|
|
||||||
split[3] = strings.Replace(split[3], ReplaceStrPort, connHost.Port(), -1)
|
|
||||||
|
|
||||||
/* Return slice */
|
|
||||||
b := make([]byte, 0)
|
|
||||||
|
|
||||||
/* Recombine the slices and add the removed tabs */
|
|
||||||
splitLen := len(split)
|
|
||||||
for i := 0; i < splitLen-1; i += 1 {
|
|
||||||
split[i] += Tab
|
|
||||||
b = append(b, []byte(split[i])...)
|
|
||||||
}
|
|
||||||
b = append(b, []byte(split[splitLen-1])...)
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Replace new-line characters */
|
|
||||||
func replaceNewLines(str string) string {
|
|
||||||
return strings.Replace(str, "\n", "", -1)
|
|
||||||
}
|
|
@ -1,237 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type GopherUrl struct {
|
|
||||||
Path string
|
|
||||||
Parameters string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
/* Just naming some constants */
|
|
||||||
DOSLineEnd = "\r\n"
|
|
||||||
UnixLineEnd = "\n"
|
|
||||||
End = "."
|
|
||||||
Tab = "\t"
|
|
||||||
LastLine = End+DOSLineEnd
|
|
||||||
|
|
||||||
/* Gopher line formatting */
|
|
||||||
MaxUserNameLen = 70 /* RFC 1436 standard, though we use user-supplied page-width */
|
|
||||||
MaxSelectorLen = 255 /* RFC 1436 standard */
|
|
||||||
SelectorErrorStr = "/max_selector_length_reached"
|
|
||||||
GophermapRenderErrorStr = ""
|
|
||||||
GophermapReadErrorStr = "Error reading subgophermap: "
|
|
||||||
GophermapExecErrorStr = "Error executing gophermap: "
|
|
||||||
|
|
||||||
/* Default null values */
|
|
||||||
NullSelector = "-"
|
|
||||||
NullHost = "null.host"
|
|
||||||
NullPort = "0"
|
|
||||||
|
|
||||||
/* Replacement strings */
|
|
||||||
ReplaceStrHostname = "$hostname"
|
|
||||||
ReplaceStrPort = "$port"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Item type characters:
|
|
||||||
* Collected from RFC 1436 standard, Wikipedia, Go-gopher project
|
|
||||||
* and Gophernicus project. Those with ALL-CAPS descriptions in
|
|
||||||
* [square brackets] defined and used by Gophernicus, a popular
|
|
||||||
* Gopher server.
|
|
||||||
*/
|
|
||||||
type ItemType byte
|
|
||||||
const (
|
|
||||||
/* RFC 1436 Standard */
|
|
||||||
TypeFile = ItemType('0') /* Regular file (text) */
|
|
||||||
TypeDirectory = ItemType('1') /* Directory (menu) */
|
|
||||||
TypeDatabase = ItemType('2') /* CCSO flat db; other db */
|
|
||||||
TypeError = ItemType('3') /* Error message */
|
|
||||||
TypeMacBinHex = ItemType('4') /* Macintosh BinHex file */
|
|
||||||
TypeBinArchive = ItemType('5') /* Binary archive (zip, rar, 7zip, tar, gzip, etc), CLIENT MUST READ UNTIL TCP CLOSE */
|
|
||||||
TypeUUEncoded = ItemType('6') /* UUEncoded archive */
|
|
||||||
TypeSearch = ItemType('7') /* Query search engine or CGI script */
|
|
||||||
TypeTelnet = ItemType('8') /* Telnet to: VT100 series server */
|
|
||||||
TypeBin = ItemType('9') /* Binary file (see also, 5), CLIENT MUST READ UNTIL TCP CLOSE */
|
|
||||||
TypeTn3270 = ItemType('T') /* Telnet to: tn3270 series server */
|
|
||||||
TypeGif = ItemType('g') /* GIF format image file (just use I) */
|
|
||||||
TypeImage = ItemType('I') /* Any format image file */
|
|
||||||
TypeRedundant = ItemType('+') /* Redundant (indicates mirror of previous item) */
|
|
||||||
|
|
||||||
/* GopherII Standard */
|
|
||||||
TypeCalendar = ItemType('c') /* Calendar file */
|
|
||||||
TypeDoc = ItemType('d') /* Word-processing document; PDF document */
|
|
||||||
TypeHtml = ItemType('h') /* HTML document */
|
|
||||||
TypeInfo = ItemType('i') /* Informational text (not selectable) */
|
|
||||||
TypeMarkup = ItemType('p') /* Page layout or markup document (plain text w/ ASCII tags) */
|
|
||||||
TypeMail = ItemType('M') /* Email repository (MBOX) */
|
|
||||||
TypeAudio = ItemType('s') /* Audio recordings */
|
|
||||||
TypeXml = ItemType('x') /* eXtensible Markup Language document */
|
|
||||||
TypeVideo = ItemType(';') /* Video files */
|
|
||||||
|
|
||||||
/* Commonly Used */
|
|
||||||
TypeTitle = ItemType('!') /* [SERVER ONLY] Menu title (set title ONCE per gophermap) */
|
|
||||||
TypeComment = ItemType('#') /* [SERVER ONLY] Comment, rest of line is ignored */
|
|
||||||
TypeHiddenFile = ItemType('-') /* [SERVER ONLY] Hide file/directory from directory listing */
|
|
||||||
TypeEnd = ItemType('.') /* [SERVER ONLY] Last line -- stop processing gophermap default */
|
|
||||||
TypeSubGophermap = ItemType('=') /* [SERVER ONLY] Include subgophermap / regular file here. */
|
|
||||||
TypeEndBeginList = ItemType('*') /* [SERVER ONLY] Last line + directory listing -- stop processing gophermap and end on directory listing */
|
|
||||||
|
|
||||||
/* Default type */
|
|
||||||
TypeDefault = TypeBin
|
|
||||||
|
|
||||||
/* Gophor specific types */
|
|
||||||
TypeInfoNotStated = ItemType('I') /* [INTERNAL USE] */
|
|
||||||
TypeUnknown = ItemType('?') /* [INTERNAL USE] */
|
|
||||||
)
|
|
||||||
|
|
||||||
var FileExtMap = map[string]ItemType{
|
|
||||||
".out": TypeBin,
|
|
||||||
".a": TypeBin,
|
|
||||||
".o": TypeBin,
|
|
||||||
".ko": TypeBin, /* ... Though tbh, kernel extensions?!!! */
|
|
||||||
".msi": TypeBin,
|
|
||||||
".exe": TypeBin,
|
|
||||||
|
|
||||||
".gophermap": TypeDirectory,
|
|
||||||
|
|
||||||
".lz": TypeBinArchive,
|
|
||||||
".gz": TypeBinArchive,
|
|
||||||
".bz2": TypeBinArchive,
|
|
||||||
".7z": TypeBinArchive,
|
|
||||||
".zip": TypeBinArchive,
|
|
||||||
|
|
||||||
".gitignore": TypeFile,
|
|
||||||
".txt": TypeFile,
|
|
||||||
".json": TypeFile,
|
|
||||||
".yaml": TypeFile,
|
|
||||||
".ocaml": TypeFile,
|
|
||||||
".s": TypeFile,
|
|
||||||
".c": TypeFile,
|
|
||||||
".py": TypeFile,
|
|
||||||
".h": TypeFile,
|
|
||||||
".go": TypeFile,
|
|
||||||
".fs": TypeFile,
|
|
||||||
".odin": TypeFile,
|
|
||||||
".nanorc": TypeFile,
|
|
||||||
".bashrc": TypeFile,
|
|
||||||
".mkshrc": TypeFile,
|
|
||||||
".vimrc": TypeFile,
|
|
||||||
".vim": TypeFile,
|
|
||||||
".viminfo": TypeFile,
|
|
||||||
".sh": TypeFile,
|
|
||||||
".conf": TypeFile,
|
|
||||||
".xinitrc": TypeFile,
|
|
||||||
".jstarrc": TypeFile,
|
|
||||||
".joerc": TypeFile,
|
|
||||||
".jpicorc": TypeFile,
|
|
||||||
".profile": TypeFile,
|
|
||||||
".bash_profile": TypeFile,
|
|
||||||
".bash_logout": TypeFile,
|
|
||||||
".log": TypeFile,
|
|
||||||
".ovpn": TypeFile,
|
|
||||||
|
|
||||||
".md": TypeMarkup,
|
|
||||||
|
|
||||||
".xml": TypeXml,
|
|
||||||
|
|
||||||
".doc": TypeDoc,
|
|
||||||
".docx": TypeDoc,
|
|
||||||
".pdf": TypeDoc,
|
|
||||||
|
|
||||||
".jpg": TypeImage,
|
|
||||||
".jpeg": TypeImage,
|
|
||||||
".png": TypeImage,
|
|
||||||
".gif": TypeImage,
|
|
||||||
|
|
||||||
".html": TypeHtml,
|
|
||||||
".htm": TypeHtml,
|
|
||||||
|
|
||||||
".ogg": TypeAudio,
|
|
||||||
".mp3": TypeAudio,
|
|
||||||
".wav": TypeAudio,
|
|
||||||
".mod": TypeAudio,
|
|
||||||
".it": TypeAudio,
|
|
||||||
".xm": TypeAudio,
|
|
||||||
".mid": TypeAudio,
|
|
||||||
".vgm": TypeAudio,
|
|
||||||
".opus": TypeAudio,
|
|
||||||
".m4a": TypeAudio,
|
|
||||||
".aac": TypeAudio,
|
|
||||||
|
|
||||||
".mp4": TypeVideo,
|
|
||||||
".mkv": TypeVideo,
|
|
||||||
".webm": TypeVideo,
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Build error line */
|
|
||||||
func buildErrorLine(selector string) []byte {
|
|
||||||
ret := string(TypeError)
|
|
||||||
ret += selector + DOSLineEnd
|
|
||||||
ret += LastLine
|
|
||||||
return []byte(ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Build gopher compliant line with supplied information */
|
|
||||||
func buildLine(t ItemType, name, selector, host string, port string) []byte {
|
|
||||||
ret := string(t)
|
|
||||||
|
|
||||||
/* Add name, truncate name if too long */
|
|
||||||
if len(name) > Config.PageWidth {
|
|
||||||
ret += name[:Config.PageWidth-5]+"..."+Tab
|
|
||||||
} else {
|
|
||||||
ret += name+Tab
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add selector. If too long use err, skip if empty */
|
|
||||||
selectorLen := len(selector)
|
|
||||||
if selectorLen > MaxSelectorLen {
|
|
||||||
ret += SelectorErrorStr+Tab
|
|
||||||
} else if selectorLen > 0 {
|
|
||||||
ret += selector+Tab
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add host + port */
|
|
||||||
ret += host+Tab+port+DOSLineEnd
|
|
||||||
|
|
||||||
return []byte(ret)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Build gopher compliant info line */
|
|
||||||
func buildInfoLine(content string) []byte {
|
|
||||||
return buildLine(TypeInfo, content, NullSelector, NullHost, NullPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Get item type for named file on disk */
|
|
||||||
func getItemType(name string) ItemType {
|
|
||||||
/* Split, name MUST be lower */
|
|
||||||
split := strings.Split(strings.ToLower(name), ".")
|
|
||||||
|
|
||||||
/* First we look at how many '.' in name string */
|
|
||||||
splitLen := len(split)
|
|
||||||
switch splitLen {
|
|
||||||
case 0:
|
|
||||||
/* Always return TypeDefault. We can never tell */
|
|
||||||
return TypeDefault
|
|
||||||
|
|
||||||
default:
|
|
||||||
/* Get index of str after last ".", look in FileExtMap */
|
|
||||||
fileType, ok := FileExtMap["."+split[splitLen-1]]
|
|
||||||
if ok {
|
|
||||||
return fileType
|
|
||||||
} else {
|
|
||||||
return TypeDefault
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Build a line separator of supplied width */
|
|
||||||
func buildLineSeparator(count int) string {
|
|
||||||
ret := ""
|
|
||||||
for i := 0; i < count; i += 1 {
|
|
||||||
ret += "_"
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
@ -0,0 +1,92 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import "gophor/core"
|
||||||
|
|
||||||
|
// Gopher specific error codes
|
||||||
|
const (
|
||||||
|
InvalidGophermapErr core.ErrorCode = 1
|
||||||
|
SubgophermapIsDirErr core.ErrorCode = 2
|
||||||
|
SubgophermapSizeErr core.ErrorCode = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
// generateErrorMessage returns a message for any gopher specific error codes
|
||||||
|
func generateErrorMessage(code core.ErrorCode) string {
|
||||||
|
switch code {
|
||||||
|
case InvalidGophermapErr:
|
||||||
|
return invalidGophermapErrStr
|
||||||
|
case SubgophermapIsDirErr:
|
||||||
|
return subgophermapIsDirErrStr
|
||||||
|
case SubgophermapSizeErr:
|
||||||
|
return subgophermapSizeErrStr
|
||||||
|
default:
|
||||||
|
return unknownErrStr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateErrorResponse takes an error code and generates an error response byte slice
|
||||||
|
func generateErrorResponse(code core.ErrorCode) ([]byte, bool) {
|
||||||
|
switch code {
|
||||||
|
case core.ConnWriteErr:
|
||||||
|
return nil, false // no point responding if we couldn't write
|
||||||
|
case core.ConnReadErr:
|
||||||
|
return buildErrorLine(errorResponse503), true
|
||||||
|
case core.ConnCloseErr:
|
||||||
|
return nil, false // no point responding if we couldn't close
|
||||||
|
case core.ListenerResolveErr:
|
||||||
|
return nil, false // not user facing
|
||||||
|
case core.ListenerBeginErr:
|
||||||
|
return nil, false // not user facing
|
||||||
|
case core.ListenerAcceptErr:
|
||||||
|
return nil, false // not user facing
|
||||||
|
case core.InvalidIPErr:
|
||||||
|
return nil, false // not user facing
|
||||||
|
case core.InvalidPortErr:
|
||||||
|
return nil, false // not user facing
|
||||||
|
case core.FileOpenErr:
|
||||||
|
return buildErrorLine(errorResponse404), true
|
||||||
|
case core.FileStatErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case core.FileReadErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case core.FileTypeErr:
|
||||||
|
return buildErrorLine(errorResponse404), true
|
||||||
|
case core.DirectoryReadErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case core.RestrictedPathErr:
|
||||||
|
return buildErrorLine(errorResponse403), true
|
||||||
|
case core.InvalidRequestErr:
|
||||||
|
return buildErrorLine(errorResponse400), true
|
||||||
|
case core.CGIStartErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case core.CGIExitCodeErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case core.CGIStatus400Err:
|
||||||
|
return buildErrorLine(errorResponse400), true
|
||||||
|
case core.CGIStatus401Err:
|
||||||
|
return buildErrorLine(errorResponse401), true
|
||||||
|
case core.CGIStatus403Err:
|
||||||
|
return buildErrorLine(errorResponse403), true
|
||||||
|
case core.CGIStatus404Err:
|
||||||
|
return buildErrorLine(errorResponse404), true
|
||||||
|
case core.CGIStatus408Err:
|
||||||
|
return buildErrorLine(errorResponse408), true
|
||||||
|
case core.CGIStatus410Err:
|
||||||
|
return buildErrorLine(errorResponse410), true
|
||||||
|
case core.CGIStatus500Err:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case core.CGIStatus501Err:
|
||||||
|
return buildErrorLine(errorResponse501), true
|
||||||
|
case core.CGIStatus503Err:
|
||||||
|
return buildErrorLine(errorResponse503), true
|
||||||
|
case core.CGIStatusUnknownErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case InvalidGophermapErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case SubgophermapIsDirErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
case SubgophermapSizeErr:
|
||||||
|
return buildErrorLine(errorResponse500), true
|
||||||
|
default:
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,36 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gophor/core"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// gophermapContents is an implementation of core.FileContents that holds individually renderable sections of a gophermap
|
||||||
|
type gophermapContents struct {
|
||||||
|
sections []gophermapSection
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToClient renders each cached section of the gophermap, and writes them to the client
|
||||||
|
func (gc *gophermapContents) WriteToClient(client *core.Client, path *core.Path) core.Error {
|
||||||
|
for _, section := range gc.sections {
|
||||||
|
err := section.RenderAndWrite(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, write the footer (including last-line)
|
||||||
|
return client.Conn().WriteBytes(footer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load takes an open FD and loads the gophermap contents into memory as different renderable sections
|
||||||
|
func (gc *gophermapContents) Load(fd *os.File, path *core.Path) core.Error {
|
||||||
|
var err core.Error
|
||||||
|
gc.sections, err = readGophermap(fd, path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear empties currently cached GophermapContents memory
|
||||||
|
func (gc *gophermapContents) Clear() {
|
||||||
|
gc.sections = nil
|
||||||
|
}
|
@ -0,0 +1,96 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gophor/core"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Gophermap line formatting constants
|
||||||
|
const (
|
||||||
|
maxSelectorLen = 255
|
||||||
|
nullHost = "null.host"
|
||||||
|
nullPort = "0"
|
||||||
|
errorSelector = "/error_selector_length"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// pageWidth is the maximum set page width of a gophermap document to render to
|
||||||
|
pageWidth int
|
||||||
|
|
||||||
|
// footer holds the formatted footer text (if supplied), and gophermap last-line
|
||||||
|
footer []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
// formatName formats a gopher line name string
|
||||||
|
func formatName(name string) string {
|
||||||
|
if len(name) > pageWidth {
|
||||||
|
return name[:pageWidth-4] + "...\t"
|
||||||
|
}
|
||||||
|
return name + "\t"
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatSelector formats a gopher line selector string
|
||||||
|
func formatSelector(selector string) string {
|
||||||
|
if len(selector) > maxSelectorLen {
|
||||||
|
return errorSelector + "\t"
|
||||||
|
}
|
||||||
|
return selector + "\t"
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatHostPort formats a gopher line host + port
|
||||||
|
func formatHostPort(host, port string) string {
|
||||||
|
return host + "\t" + port
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildLine builds a gopher line string
|
||||||
|
func buildLine(t ItemType, name, selector, host, port string) []byte {
|
||||||
|
return []byte(string(t) + formatName(name) + formatSelector(selector) + formatHostPort(host, port) + "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildInfoLine builds a gopher info line string
|
||||||
|
func buildInfoLine(line string) []byte {
|
||||||
|
return []byte(string(typeInfo) + formatName(line) + formatHostPort(nullHost, nullPort) + "\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildErrorLine builds a gopher error line string
|
||||||
|
func buildErrorLine(selector string) []byte {
|
||||||
|
return []byte(string(typeError) + selector + "\r\n" + ".\r\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendFileListing formats and appends a new file entry as part of a directory listing
|
||||||
|
func appendFileListing(b []byte, file os.FileInfo, p *core.Path) []byte {
|
||||||
|
switch {
|
||||||
|
case file.Mode()&os.ModeDir != 0:
|
||||||
|
return append(b, buildLine(typeDirectory, file.Name(), p.Selector(), core.Hostname, core.FwdPort)...)
|
||||||
|
case file.Mode()&os.ModeType == 0:
|
||||||
|
t := getItemType(p.Relative())
|
||||||
|
return append(b, buildLine(t, file.Name(), p.Selector(), core.Hostname, core.FwdPort)...)
|
||||||
|
default:
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildFooter formats a raw gopher footer ready to attach to end of gophermaps (including DOS line-end)
|
||||||
|
func buildFooter(raw string) []byte {
|
||||||
|
ret := make([]byte, 0)
|
||||||
|
|
||||||
|
if raw != "" {
|
||||||
|
ret = append(ret, buildInfoLine(footerLineSeparator())...)
|
||||||
|
|
||||||
|
for _, line := range strings.Split(raw, "\n") {
|
||||||
|
ret = append(ret, buildInfoLine(line)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(ret, []byte(".\r\n")...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// footerLineSeparator is an internal function that generates a footer line separator string
|
||||||
|
func footerLineSeparator() string {
|
||||||
|
ret := ""
|
||||||
|
for i := 0; i < pageWidth; i++ {
|
||||||
|
ret += "_"
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
@ -0,0 +1,240 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gophor/core"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// subgophermapSizeMax specifies the maximum size of an included subgophermap
|
||||||
|
subgophermapSizeMax int64
|
||||||
|
)
|
||||||
|
|
||||||
|
// GophermapSection is an interface that specifies individually renderable (and writeable) sections of a gophermap
|
||||||
|
type gophermapSection interface {
|
||||||
|
RenderAndWrite(*core.Client) core.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// readGophermap reads a FD and Path as gophermap sections
|
||||||
|
func readGophermap(fd *os.File, p *core.Path) ([]gophermapSection, core.Error) {
|
||||||
|
// Create return slice
|
||||||
|
sections := make([]gophermapSection, 0)
|
||||||
|
|
||||||
|
// Create hidden files map now in case later requested
|
||||||
|
hidden := map[string]bool{
|
||||||
|
p.Relative(): true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error setting within nested function below
|
||||||
|
var returnErr core.Error
|
||||||
|
|
||||||
|
// Perform scan of gophermap FD
|
||||||
|
titleAlready := false
|
||||||
|
scanErr := core.FileSystem.ScanFile(
|
||||||
|
fd,
|
||||||
|
func(line string) bool {
|
||||||
|
// Parse the line item type and handle
|
||||||
|
lineType := parseLineType(line)
|
||||||
|
switch lineType {
|
||||||
|
case typeInfoNotStated:
|
||||||
|
// Append TypeInfo to beginning of line
|
||||||
|
sections = append(sections, &TextSection{buildInfoLine(line)})
|
||||||
|
return true
|
||||||
|
|
||||||
|
case typeTitle:
|
||||||
|
// Reformat title line to send as info line with appropriate selector
|
||||||
|
if !titleAlready {
|
||||||
|
sections = append(sections, &TextSection{buildLine(typeInfo, line[1:], "TITLE", nullHost, nullPort)})
|
||||||
|
titleAlready = true
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
returnErr = core.NewError(InvalidGophermapErr)
|
||||||
|
return false
|
||||||
|
|
||||||
|
case typeComment:
|
||||||
|
// ignore this line
|
||||||
|
return true
|
||||||
|
|
||||||
|
case typeHiddenFile:
|
||||||
|
// Add to hidden files map
|
||||||
|
hidden[p.JoinRelative(line[1:])] = true
|
||||||
|
return true
|
||||||
|
|
||||||
|
case typeSubGophermap:
|
||||||
|
// Parse new Path and parameters
|
||||||
|
request := core.ParseInternalRequest(p, line[1:])
|
||||||
|
if returnErr != nil {
|
||||||
|
return false
|
||||||
|
} else if request.Path().Relative() == "" || request.Path().Relative() == p.Relative() {
|
||||||
|
returnErr = core.NewError(InvalidGophermapErr)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open FD
|
||||||
|
var subFD *os.File
|
||||||
|
subFD, returnErr = core.FileSystem.OpenFile(request.Path())
|
||||||
|
if returnErr != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get stat
|
||||||
|
stat, err := subFD.Stat()
|
||||||
|
if err != nil {
|
||||||
|
returnErr = core.WrapError(core.FileStatErr, err)
|
||||||
|
return false
|
||||||
|
} else if stat.IsDir() {
|
||||||
|
returnErr = core.NewError(SubgophermapIsDirErr)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle CGI script
|
||||||
|
if core.WithinCGIDir(request.Path()) {
|
||||||
|
sections = append(sections, &CGISection{request})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error out if file too big
|
||||||
|
if stat.Size() > subgophermapSizeMax {
|
||||||
|
returnErr = core.NewError(SubgophermapSizeErr)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle regular file
|
||||||
|
if !isGophermap(request.Path()) {
|
||||||
|
sections = append(sections, &FileSection{})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle gophermap
|
||||||
|
sections = append(sections, &SubgophermapSection{})
|
||||||
|
return true
|
||||||
|
|
||||||
|
case typeEnd:
|
||||||
|
// Last line, break-out!
|
||||||
|
return false
|
||||||
|
|
||||||
|
case typeEndBeginList:
|
||||||
|
// Append DirectorySection object then break, as-with typeEnd
|
||||||
|
dirPath := p.Dir()
|
||||||
|
sections = append(sections, &DirectorySection{hidden, dirPath})
|
||||||
|
return false
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Default is appending to sections slice as TextSection
|
||||||
|
sections = append(sections, &TextSection{[]byte(line + "\r\n")})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Check the scan didn't exit with error
|
||||||
|
if returnErr != nil {
|
||||||
|
return nil, returnErr
|
||||||
|
} else if scanErr != nil {
|
||||||
|
return nil, scanErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return sections, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextSection is a simple implementation that holds line's byte contents as-is
|
||||||
|
type TextSection struct {
|
||||||
|
contents []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderAndWrite simply writes the byte slice to the client
|
||||||
|
func (s *TextSection) RenderAndWrite(client *core.Client) core.Error {
|
||||||
|
return client.Conn().WriteBytes(s.contents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DirectorySection is an implementation that holds a dir path, and map of hidden files, to later list a dir contents
|
||||||
|
type DirectorySection struct {
|
||||||
|
hidden map[string]bool
|
||||||
|
path *core.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderAndWrite scans and renders a list of the contents of a directory (skipping hidden or restricted files)
|
||||||
|
func (s *DirectorySection) RenderAndWrite(client *core.Client) core.Error {
|
||||||
|
fd, err := core.FileSystem.OpenFile(s.path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice to write
|
||||||
|
dirContents := make([]byte, 0)
|
||||||
|
|
||||||
|
// Scan directory and build lines
|
||||||
|
err = core.FileSystem.ScanDirectory(fd, s.path, func(file os.FileInfo, p *core.Path) {
|
||||||
|
// Append new formatted file listing (if correct type)
|
||||||
|
dirContents = appendFileListing(dirContents, file, p)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write dirContents to client
|
||||||
|
return client.Conn().WriteBytes(dirContents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileSection is an implementation that holds a file path, and writes the file contents to client
|
||||||
|
type FileSection struct {
|
||||||
|
path *core.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderAndWrite simply opens, reads and writes the file contents to the client
|
||||||
|
func (s *FileSection) RenderAndWrite(client *core.Client) core.Error {
|
||||||
|
// Open FD for the file
|
||||||
|
fd, err := core.FileSystem.OpenFile(s.path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the file contents into memory
|
||||||
|
b, err := core.FileSystem.ReadFile(fd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the file contents to the client
|
||||||
|
return client.Conn().WriteBytes(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubgophermapSection is an implementation to hold onto a gophermap path, then read, render and write contents to a client
|
||||||
|
type SubgophermapSection struct {
|
||||||
|
path *core.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderAndWrite reads, renders and writes the contents of the gophermap to the client
|
||||||
|
func (s *SubgophermapSection) RenderAndWrite(client *core.Client) core.Error {
|
||||||
|
// Get FD for gophermap
|
||||||
|
fd, err := core.FileSystem.OpenFile(s.path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read gophermap into sections
|
||||||
|
sections, err := readGophermap(fd, s.path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write each of the sections (AAAA COULD BE RECURSIONNNNN)
|
||||||
|
for _, section := range sections {
|
||||||
|
err := section.RenderAndWrite(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CGISection is an implementation that holds onto a built request, then processing as a CGI request on request
|
||||||
|
type CGISection struct {
|
||||||
|
request *core.Request
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenderAndWrite takes the request, and executes the associated CGI script with parameters
|
||||||
|
func (s *CGISection) RenderAndWrite(client *core.Client) core.Error {
|
||||||
|
return core.ExecuteCGIScript(client, s.request)
|
||||||
|
}
|
@ -0,0 +1,22 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
// generateHTMLRedirect takes a URL string and generates an HTML redirect page bytes
|
||||||
|
func generateHTMLRedirect(url string) []byte {
|
||||||
|
content :=
|
||||||
|
"<html>\n" +
|
||||||
|
"<head>\n" +
|
||||||
|
"<meta http-equiv=\"refresh\" content=\"1;URL=" + url + "\">" +
|
||||||
|
"</head>\n" +
|
||||||
|
"<body>\n" +
|
||||||
|
"You are following an external link to a web site.\n" +
|
||||||
|
"You will be automatically taken to the site shortly.\n" +
|
||||||
|
"If you do not get sent there, please click <A HREF=\"" + url + "\">here</A> to go to the web site.\n" +
|
||||||
|
"<p>\n" +
|
||||||
|
"The URL linked is <A HREF=\"" + url + "\">" + url + "</A>\n" +
|
||||||
|
"<p>\n" +
|
||||||
|
"Thanks for using Gophor!\n" +
|
||||||
|
"</body>\n" +
|
||||||
|
"</html>\n"
|
||||||
|
|
||||||
|
return []byte(content)
|
||||||
|
}
|
@ -0,0 +1,192 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// ItemType specifies a gopher item type char
|
||||||
|
type ItemType byte
|
||||||
|
|
||||||
|
// RFC 1436 Standard item types
|
||||||
|
const (
|
||||||
|
typeFile = ItemType('0') /* Regular file (text) */
|
||||||
|
typeDirectory = ItemType('1') /* Directory (menu) */
|
||||||
|
typeDatabase = ItemType('2') /* CCSO flat db; other db */
|
||||||
|
typeError = ItemType('3') /* Error message */
|
||||||
|
typeMacBinHex = ItemType('4') /* Macintosh BinHex file */
|
||||||
|
typeBinArchive = ItemType('5') /* Binary archive (zip, rar, 7zip, tar, gzip, etc), CLIENT MUST READ UNTIL TCP CLOSE */
|
||||||
|
typeUUEncoded = ItemType('6') /* UUEncoded archive */
|
||||||
|
typeSearch = ItemType('7') /* Query search engine or CGI script */
|
||||||
|
typeTelnet = ItemType('8') /* Telnet to: VT100 series server */
|
||||||
|
typeBin = ItemType('9') /* Binary file (see also, 5), CLIENT MUST READ UNTIL TCP CLOSE */
|
||||||
|
typeTn3270 = ItemType('T') /* Telnet to: tn3270 series server */
|
||||||
|
typeGif = ItemType('g') /* GIF format image file (just use I) */
|
||||||
|
typeImage = ItemType('I') /* Any format image file */
|
||||||
|
typeRedundant = ItemType('+') /* Redundant (indicates mirror of previous item) */
|
||||||
|
)
|
||||||
|
|
||||||
|
// GopherII Standard item types
|
||||||
|
const (
|
||||||
|
typeCalendar = ItemType('c') /* Calendar file */
|
||||||
|
typeDoc = ItemType('d') /* Word-processing document; PDF document */
|
||||||
|
typeHTML = ItemType('h') /* HTML document */
|
||||||
|
typeInfo = ItemType('i') /* Informational text (not selectable) */
|
||||||
|
typeMarkup = ItemType('p') /* Page layout or markup document (plain text w/ ASCII tags) */
|
||||||
|
typeMail = ItemType('M') /* Email repository (MBOX) */
|
||||||
|
typeAudio = ItemType('s') /* Audio recordings */
|
||||||
|
typeXML = ItemType('x') /* eXtensible Markup Language document */
|
||||||
|
typeVideo = ItemType(';') /* Video files */
|
||||||
|
)
|
||||||
|
|
||||||
|
// Commonly Used item types
|
||||||
|
const (
|
||||||
|
typeTitle = ItemType('!') /* [SERVER ONLY] Menu title (set title ONCE per gophermap) */
|
||||||
|
typeComment = ItemType('#') /* [SERVER ONLY] Comment, rest of line is ignored */
|
||||||
|
typeHiddenFile = ItemType('-') /* [SERVER ONLY] Hide file/directory from directory listing */
|
||||||
|
typeEnd = ItemType('.') /* [SERVER ONLY] Last line -- stop processing gophermap default */
|
||||||
|
typeSubGophermap = ItemType('=') /* [SERVER ONLY] Include subgophermap / regular file here. */
|
||||||
|
typeEndBeginList = ItemType('*') /* [SERVER ONLY] Last line + directory listing -- stop processing gophermap and end on directory listing */
|
||||||
|
)
|
||||||
|
|
||||||
|
// Internal item types
|
||||||
|
const (
|
||||||
|
typeDefault = typeBin
|
||||||
|
typeInfoNotStated = ItemType('I')
|
||||||
|
typeUnknown = ItemType('?')
|
||||||
|
)
|
||||||
|
|
||||||
|
// fileExtMap specifies mapping of file extensions to gopher item types
|
||||||
|
var fileExtMap = map[string]ItemType{
|
||||||
|
".out": typeBin,
|
||||||
|
".a": typeBin,
|
||||||
|
".o": typeBin,
|
||||||
|
".ko": typeBin, /* Kernel extensions... WHY ARE YOU GIVING ACCESS TO DIRECTORIES WITH THIS */
|
||||||
|
|
||||||
|
".gophermap": typeDirectory,
|
||||||
|
|
||||||
|
".lz": typeBinArchive,
|
||||||
|
".gz": typeBinArchive,
|
||||||
|
".bz2": typeBinArchive,
|
||||||
|
".7z": typeBinArchive,
|
||||||
|
".zip": typeBinArchive,
|
||||||
|
|
||||||
|
".gitignore": typeFile,
|
||||||
|
".txt": typeFile,
|
||||||
|
".json": typeFile,
|
||||||
|
".yaml": typeFile,
|
||||||
|
".ocaml": typeFile,
|
||||||
|
".s": typeFile,
|
||||||
|
".c": typeFile,
|
||||||
|
".py": typeFile,
|
||||||
|
".h": typeFile,
|
||||||
|
".go": typeFile,
|
||||||
|
".fs": typeFile,
|
||||||
|
".odin": typeFile,
|
||||||
|
".nanorc": typeFile,
|
||||||
|
".bashrc": typeFile,
|
||||||
|
".mkshrc": typeFile,
|
||||||
|
".vimrc": typeFile,
|
||||||
|
".vim": typeFile,
|
||||||
|
".viminfo": typeFile,
|
||||||
|
".sh": typeFile,
|
||||||
|
".conf": typeFile,
|
||||||
|
".xinitrc": typeFile,
|
||||||
|
".jstarrc": typeFile,
|
||||||
|
".joerc": typeFile,
|
||||||
|
".jpicorc": typeFile,
|
||||||
|
".profile": typeFile,
|
||||||
|
".bash_profile": typeFile,
|
||||||
|
".bash_logout": typeFile,
|
||||||
|
".log": typeFile,
|
||||||
|
".ovpn": typeFile,
|
||||||
|
|
||||||
|
".md": typeMarkup,
|
||||||
|
|
||||||
|
".xml": typeXML,
|
||||||
|
|
||||||
|
".doc": typeDoc,
|
||||||
|
".docx": typeDoc,
|
||||||
|
".pdf": typeDoc,
|
||||||
|
|
||||||
|
".jpg": typeImage,
|
||||||
|
".jpeg": typeImage,
|
||||||
|
".png": typeImage,
|
||||||
|
".gif": typeImage,
|
||||||
|
|
||||||
|
".html": typeHTML,
|
||||||
|
".htm": typeHTML,
|
||||||
|
|
||||||
|
".ogg": typeAudio,
|
||||||
|
".mp3": typeAudio,
|
||||||
|
".wav": typeAudio,
|
||||||
|
".mod": typeAudio,
|
||||||
|
".it": typeAudio,
|
||||||
|
".xm": typeAudio,
|
||||||
|
".mid": typeAudio,
|
||||||
|
".vgm": typeAudio,
|
||||||
|
".opus": typeAudio,
|
||||||
|
".m4a": typeAudio,
|
||||||
|
".aac": typeAudio,
|
||||||
|
|
||||||
|
".mp4": typeVideo,
|
||||||
|
".mkv": typeVideo,
|
||||||
|
".webm": typeVideo,
|
||||||
|
".avi": typeVideo,
|
||||||
|
}
|
||||||
|
|
||||||
|
// getItemType is an internal function to get an ItemType for a file name string
|
||||||
|
func getItemType(name string) ItemType {
|
||||||
|
// Split, name MUST be lower
|
||||||
|
split := strings.Split(strings.ToLower(name), ".")
|
||||||
|
|
||||||
|
// First we look at how many '.' in name string
|
||||||
|
splitLen := len(split)
|
||||||
|
switch splitLen {
|
||||||
|
case 0:
|
||||||
|
// Always return typeDefault, we can't tell
|
||||||
|
return typeDefault
|
||||||
|
|
||||||
|
default:
|
||||||
|
// get index of str after last '.', look up in fileExtMap
|
||||||
|
fileType, ok := fileExtMap["."+split[splitLen-1]]
|
||||||
|
if ok {
|
||||||
|
return fileType
|
||||||
|
}
|
||||||
|
return typeDefault
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseLineType parses a gophermap's line type based on first char and contents
|
||||||
|
func parseLineType(line string) ItemType {
|
||||||
|
lineLen := len(line)
|
||||||
|
|
||||||
|
if lineLen == 0 {
|
||||||
|
return typeInfoNotStated
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get ItemType for first char
|
||||||
|
t := ItemType(line[0])
|
||||||
|
|
||||||
|
if lineLen == 1 {
|
||||||
|
// The only accepted types for length 1 line below:
|
||||||
|
t := ItemType(line[0])
|
||||||
|
if t == typeEnd ||
|
||||||
|
t == typeEndBeginList ||
|
||||||
|
t == typeComment ||
|
||||||
|
t == typeInfo ||
|
||||||
|
t == typeTitle {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
return typeUnknown
|
||||||
|
} else if !strings.Contains(line, "\t") {
|
||||||
|
// The only accepted types for length >= 1 and with a tab
|
||||||
|
if t == typeComment ||
|
||||||
|
t == typeTitle ||
|
||||||
|
t == typeInfo ||
|
||||||
|
t == typeHiddenFile ||
|
||||||
|
t == typeSubGophermap {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
return typeInfoNotStated
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
@ -0,0 +1,37 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"gophor/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setup parses gopher specific flags, and all core flags, preparing server for .Run()
|
||||||
|
func setup() {
|
||||||
|
pWidth := flag.Uint(pageWidthFlagStr, 80, pageWidthDescStr)
|
||||||
|
footerText := flag.String(footerTextFlagStr, "Gophor, a gopher server in Go!", footerTextDescStr)
|
||||||
|
subgopherSizeMax := flag.Float64(subgopherSizeMaxFlagStr, 1.0, subgopherSizeMaxDescStr)
|
||||||
|
admin := flag.String(adminFlagStr, "", adminDescStr)
|
||||||
|
desc := flag.String(descFlagStr, "", descDescStr)
|
||||||
|
geo := flag.String(geoFlagStr, "", geoDescStr)
|
||||||
|
core.ParseFlagsAndSetup(generateErrorMessage)
|
||||||
|
|
||||||
|
// Setup gopher specific global variables
|
||||||
|
subgophermapSizeMax = int64(1048576.0 * *subgopherSizeMax) // convert float to megabytes
|
||||||
|
pageWidth = int(*pWidth)
|
||||||
|
footer = buildFooter(*footerText)
|
||||||
|
gophermapRegex = compileGophermapRegex()
|
||||||
|
|
||||||
|
// Generate capability files
|
||||||
|
capsTxt := generateCapsTxt(*desc, *admin, *geo)
|
||||||
|
robotsTxt := generateRobotsTxt()
|
||||||
|
|
||||||
|
// Add generated files to cache
|
||||||
|
core.FileSystem.AddGeneratedFile(core.NewPath(core.Root, "caps.txt"), capsTxt)
|
||||||
|
core.FileSystem.AddGeneratedFile(core.NewPath(core.Root, "robots.txt"), robotsTxt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run does as says :)
|
||||||
|
func Run() {
|
||||||
|
setup()
|
||||||
|
core.Start(serve)
|
||||||
|
}
|
@ -0,0 +1,48 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import "gophor/core"
|
||||||
|
|
||||||
|
func generatePolicyHeader(name string) string {
|
||||||
|
text := "# This is an automatically generated" + "\r\n"
|
||||||
|
text += "# server policy file: " + name + "\r\n"
|
||||||
|
text += "#" + "\r\n"
|
||||||
|
text += "# BlackLivesMatter" + "\r\n"
|
||||||
|
return text
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateCapsTxt(desc, admin, geo string) []byte {
|
||||||
|
text := "CAPS" + "\r\n"
|
||||||
|
text += "\r\n"
|
||||||
|
text += generatePolicyHeader("caps.txt")
|
||||||
|
text += "\r\n"
|
||||||
|
text += "CapsVersion=1" + "\r\n"
|
||||||
|
text += "ExpireCapsAfter=1800" + "\r\n"
|
||||||
|
text += "\r\n"
|
||||||
|
text += "PathDelimeter=/" + "\r\n"
|
||||||
|
text += "PathIdentity=." + "\r\n"
|
||||||
|
text += "PathParent=.." + "\r\n"
|
||||||
|
text += "PathParentDouble=FALSE" + "\r\n"
|
||||||
|
text += "PathEscapeCharacter=\\" + "\r\n"
|
||||||
|
text += "PathKeepPreDelimeter=FALSE" + "\r\n"
|
||||||
|
text += "\r\n"
|
||||||
|
text += "ServerSoftware=Gophor" + "\r\n"
|
||||||
|
text += "ServerSoftwareVersion=" + core.Version + "\r\n"
|
||||||
|
text += "ServerDescription=" + desc + "\r\n"
|
||||||
|
text += "ServerGeolocationString=" + geo + "\r\n"
|
||||||
|
text += "ServerDefaultEncoding=utf-8" + "\r\n"
|
||||||
|
text += "\r\n"
|
||||||
|
text += "ServerAdmin=" + admin + "\r\n"
|
||||||
|
return []byte(text)
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateRobotsTxt() []byte {
|
||||||
|
text := generatePolicyHeader("robots.txt")
|
||||||
|
text += "\r\n"
|
||||||
|
text += "Usage-agent: *" + "\r\n"
|
||||||
|
text += "Disallow: *" + "\r\n"
|
||||||
|
text += "\r\n"
|
||||||
|
text += "Crawl-delay: 99999" + "\r\n"
|
||||||
|
text += "\r\n"
|
||||||
|
text += "# This server does not support scraping" + "\r\n"
|
||||||
|
return []byte(text)
|
||||||
|
}
|
@ -0,0 +1,21 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gophor/core"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// gophermapRegex is the precompiled gophermap file name regex check
|
||||||
|
gophermapRegex *regexp.Regexp
|
||||||
|
)
|
||||||
|
|
||||||
|
// compileGophermapRegex compiles the gophermap file name check regex
|
||||||
|
func compileGophermapRegex() *regexp.Regexp {
|
||||||
|
return regexp.MustCompile(`^(|.+/|.+\.)gophermap$`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isGophermap checks against gophermap regex as to whether a file path is a gophermap
|
||||||
|
func isGophermap(path *core.Path) bool {
|
||||||
|
return gophermapRegex.MatchString(path.Relative())
|
||||||
|
}
|
@ -0,0 +1,110 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gophor/core"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// serve is the global gopher server's serve function
|
||||||
|
func serve(client *core.Client) {
|
||||||
|
// Receive line from client
|
||||||
|
received, err := client.Conn().ReadLine()
|
||||||
|
if err != nil {
|
||||||
|
client.LogError(clientReadFailStr)
|
||||||
|
handleError(client, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to string
|
||||||
|
line := string(received)
|
||||||
|
|
||||||
|
// If prefixed by 'URL:' send a redirect
|
||||||
|
lenBefore := len(line)
|
||||||
|
line = strings.TrimPrefix(line, "URL:")
|
||||||
|
if len(line) < lenBefore {
|
||||||
|
client.Conn().WriteBytes(generateHTMLRedirect(line))
|
||||||
|
client.LogInfo(clientRedirectFmtStr, line)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse new request
|
||||||
|
request, err := core.ParseURLEncodedRequest(line)
|
||||||
|
if err != nil {
|
||||||
|
client.LogError(clientRequestParseFailStr)
|
||||||
|
handleError(client, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the request!
|
||||||
|
err = core.FileSystem.HandleClient(
|
||||||
|
client,
|
||||||
|
request,
|
||||||
|
newFileContents,
|
||||||
|
func(fs *core.FileSystemObject, client *core.Client, fd *os.File, p *core.Path) core.Error {
|
||||||
|
// First check for gophermap, create gophermap Path object
|
||||||
|
gophermap := p.JoinPath("gophermap")
|
||||||
|
|
||||||
|
// If gophermap exists, we fetch this
|
||||||
|
fd2, err := fs.OpenFile(gophermap)
|
||||||
|
if err == nil {
|
||||||
|
stat, osErr := fd2.Stat()
|
||||||
|
if osErr == nil {
|
||||||
|
return fs.FetchFile(client, fd2, stat, gophermap, newFileContents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Else, just close fd2
|
||||||
|
fd2.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice to write
|
||||||
|
dirContents := make([]byte, 0)
|
||||||
|
|
||||||
|
// Add directory heading + empty line
|
||||||
|
dirContents = append(dirContents, buildLine(typeInfo, "[ "+core.Hostname+p.Selector()+" ]", "TITLE", nullHost, nullPort)...)
|
||||||
|
dirContents = append(dirContents, buildInfoLine("")...)
|
||||||
|
|
||||||
|
// Scan directory and build lines
|
||||||
|
err = fs.ScanDirectory(
|
||||||
|
fd,
|
||||||
|
p,
|
||||||
|
func(file os.FileInfo, fp *core.Path) {
|
||||||
|
// Append new formatted file listing (if correct type)
|
||||||
|
dirContents = appendFileListing(dirContents, file, fp)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add footer, write contents
|
||||||
|
dirContents = append(dirContents, footer...)
|
||||||
|
return client.Conn().WriteBytes(dirContents)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Final error handling
|
||||||
|
if err != nil {
|
||||||
|
handleError(client, err)
|
||||||
|
client.LogError(clientServeFailStr, request.Path().Absolute())
|
||||||
|
} else {
|
||||||
|
client.LogInfo(clientServedStr, request.Path().Absolute())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleError determines whether to send an error response to the client, and logs to system
|
||||||
|
func handleError(client *core.Client, err core.Error) {
|
||||||
|
response, ok := generateErrorResponse(err.Code())
|
||||||
|
if ok {
|
||||||
|
client.Conn().WriteBytes(response)
|
||||||
|
}
|
||||||
|
core.SystemLog.Error(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// newFileContents returns a new FileContents object
|
||||||
|
func newFileContents(p *core.Path) core.FileContents {
|
||||||
|
if isGophermap(p) {
|
||||||
|
return &gophermapContents{}
|
||||||
|
}
|
||||||
|
return &core.RegularFileContents{}
|
||||||
|
}
|
@ -0,0 +1,49 @@
|
|||||||
|
package gopher
|
||||||
|
|
||||||
|
// Client error response strings
|
||||||
|
const (
|
||||||
|
errorResponse400 = "400 Bad Request"
|
||||||
|
errorResponse401 = "401 Unauthorised"
|
||||||
|
errorResponse403 = "403 Forbidden"
|
||||||
|
errorResponse404 = "404 Not Found"
|
||||||
|
errorResponse408 = "408 Request Time-out"
|
||||||
|
errorResponse410 = "410 Gone"
|
||||||
|
errorResponse500 = "500 Internal Server Error"
|
||||||
|
errorResponse501 = "501 Not Implemented"
|
||||||
|
errorResponse503 = "503 Service Unavailable"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Gopher flag string constants
|
||||||
|
const (
|
||||||
|
pageWidthFlagStr = "page-width"
|
||||||
|
pageWidthDescStr = "Gopher page width"
|
||||||
|
|
||||||
|
footerTextFlagStr = "footer-text"
|
||||||
|
footerTextDescStr = "Footer text (empty to disable)"
|
||||||
|
|
||||||
|
subgopherSizeMaxFlagStr = "subgopher-size-max"
|
||||||
|
subgopherSizeMaxDescStr = "Subgophermap size max (megabytes)"
|
||||||
|
|
||||||
|
adminFlagStr = "admin"
|
||||||
|
adminDescStr = "Generated policy file admin email"
|
||||||
|
|
||||||
|
descFlagStr = "description"
|
||||||
|
descDescStr = "Generated policy file server description"
|
||||||
|
|
||||||
|
geoFlagStr = "geolocation"
|
||||||
|
geoDescStr = "Generated policy file server geolocation"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Log string constants
|
||||||
|
const (
|
||||||
|
clientReadFailStr = "Failed to read"
|
||||||
|
clientRedirectFmtStr = "Redirecting to: %s"
|
||||||
|
clientRequestParseFailStr = "Failed to parse request"
|
||||||
|
clientServeFailStr = "Failed to serve: %s"
|
||||||
|
clientServedStr = "Served: %s"
|
||||||
|
|
||||||
|
invalidGophermapErrStr = "Invalid gophermap"
|
||||||
|
subgophermapIsDirErrStr = "Subgophermap path is dir"
|
||||||
|
subgophermapSizeErrStr = "Subgophermap size too large"
|
||||||
|
unknownErrStr = "Unknown error code"
|
||||||
|
)
|
@ -1,254 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"log"
|
|
||||||
"strconv"
|
|
||||||
"syscall"
|
|
||||||
"os/signal"
|
|
||||||
"flag"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
GophorVersion = "1.0-beta"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
Config *ServerConfig
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
/* Quickly setup global logger */
|
|
||||||
setupGlobalLogger()
|
|
||||||
|
|
||||||
/* Setup the entire server, getting slice of listeners in return */
|
|
||||||
listeners := setupServer()
|
|
||||||
|
|
||||||
/* Handle signals so we can _actually_ shutdowm */
|
|
||||||
signals := make(chan os.Signal)
|
|
||||||
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
|
|
||||||
|
|
||||||
/* Start accepting connections on any supplied listeners */
|
|
||||||
for _, l := range listeners {
|
|
||||||
go func() {
|
|
||||||
Config.SysLog.Info("", "Listening on: gopher://%s:%s\n", l.Host.Name(), l.Host.RealPort())
|
|
||||||
|
|
||||||
for {
|
|
||||||
worker, err := l.Accept()
|
|
||||||
if err != nil {
|
|
||||||
Config.SysLog.Error("", "Error accepting connection: %s\n", err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
go worker.Serve()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* When OS signal received, we close-up */
|
|
||||||
sig := <-signals
|
|
||||||
Config.SysLog.Info("", "Signal received: %v. Shutting down...\n", sig)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupServer() []*GophorListener {
|
|
||||||
/* First we setup all the flags and parse them... */
|
|
||||||
|
|
||||||
/* Base server settings */
|
|
||||||
serverRoot := flag.String("root", "/var/gopher", "Change server root directory.")
|
|
||||||
serverBindAddr := flag.String("bind-addr", "", "Change server socket bind address")
|
|
||||||
serverPort := flag.Int("port", 70, "Change server bind port.")
|
|
||||||
|
|
||||||
serverFwdPort := flag.Int("fwd-port", 0, "Change port used in '$port' replacement strings (useful if you're port forwarding).")
|
|
||||||
serverHostname := flag.String("hostname", "", "Change server hostname (FQDN).")
|
|
||||||
|
|
||||||
/* Logging settings */
|
|
||||||
systemLogPath := flag.String("system-log", "", "Change server system log file (blank outputs to stderr).")
|
|
||||||
accessLogPath := flag.String("access-log", "", "Change server access log file (blank outputs to stderr).")
|
|
||||||
logOutput := flag.String("log-output", "stderr", "Change server log file handling (disable|stderr|file)")
|
|
||||||
logOpts := flag.String("log-opts", "timestamp,ip", "Comma-separated list of log options (timestamp|ip)")
|
|
||||||
|
|
||||||
/* File system */
|
|
||||||
fileMonitorFreq := flag.Duration("file-monitor-freq", time.Second*60, "Change file monitor frequency.")
|
|
||||||
|
|
||||||
/* Cache settings */
|
|
||||||
cacheSize := flag.Int("cache-size", 50, "Change file cache size, measured in file count.")
|
|
||||||
cacheFileSizeMax := flag.Float64("cache-file-max", 0.5, "Change maximum file size to be cached (in megabytes).")
|
|
||||||
cacheDisabled := flag.Bool("disable-cache", false, "Disable file caching.")
|
|
||||||
|
|
||||||
/* Content settings */
|
|
||||||
pageWidth := flag.Int("page-width", 80, "Change page width used when formatting output.")
|
|
||||||
// charSet := flag.String("charset", "", "Change default output charset.")
|
|
||||||
charSet := "utf-8"
|
|
||||||
|
|
||||||
footerText := flag.String("footer", " Gophor, a Gopher server in Go.", "Change gophermap footer text (Unix new-line separated lines).")
|
|
||||||
footerSeparator := flag.Bool("no-footer-separator", false, "Disable footer line separator.")
|
|
||||||
|
|
||||||
/* Regex */
|
|
||||||
restrictedFiles := flag.String("restrict-files", "", "New-line separated list of regex statements restricting accessible files.")
|
|
||||||
fileRemaps := flag.String("file-remap", "", "New-line separated list of file remappings of format: /virtual/relative/path -> /actual/relative/path")
|
|
||||||
|
|
||||||
/* User supplied caps.txt information */
|
|
||||||
serverDescription := flag.String("description", "Gophor, a Gopher server in Go.", "Change server description in generated caps.txt.")
|
|
||||||
serverAdmin := flag.String("admin-email", "", "Change admin email in generated caps.txt.")
|
|
||||||
serverGeoloc := flag.String("geoloc", "", "Change server gelocation string in generated caps.txt.")
|
|
||||||
|
|
||||||
/* Exec settings */
|
|
||||||
disableCgi := flag.Bool("disable-cgi", false, "Disable CGI and all executable support.")
|
|
||||||
httpCompatCgi := flag.Bool("http-compat-cgi", false, "Enable HTTP CGI script compatibility (will strip HTTP headers).")
|
|
||||||
httpHeaderBuf := flag.Int("http-header-buf", 4096, "Change max CGI read count to look for and strip HTTP headers before sending raw (bytes).")
|
|
||||||
safeExecPath := flag.String("safe-path", "/usr/bin:/bin", "Set safe PATH variable to be used when executing CGI scripts, gophermaps and inline shell commands.")
|
|
||||||
maxExecRunTime := flag.Duration("max-exec-time", time.Second*3, "Change max executable CGI, gophermap and inline shell command runtime.")
|
|
||||||
|
|
||||||
/* Buffer sizes */
|
|
||||||
socketWriteBuf := flag.Int("socket-write-buf", 4096, "Change socket write buffer size (bytes).")
|
|
||||||
socketReadBuf := flag.Int("socket-read-buf", 256, "Change socket read buffer size (bytes).")
|
|
||||||
socketReadMax := flag.Int("socket-read-max", 8, "Change socket read count max (integer multiplier socket-read-buf-max)")
|
|
||||||
fileReadBuf := flag.Int("file-read-buf", 4096, "Change file read buffer size (bytes).")
|
|
||||||
|
|
||||||
/* Socket deadliens */
|
|
||||||
socketReadTimeout := flag.Duration("socket-read-timeout", time.Second*5, "Change socket read deadline (timeout).")
|
|
||||||
socketWriteTimeout := flag.Duration("socket-write-timeout", time.Second*30, "Change socket write deadline (timeout).")
|
|
||||||
|
|
||||||
/* Version string */
|
|
||||||
version := flag.Bool("version", false, "Print version information.")
|
|
||||||
|
|
||||||
/* Parse parse parse!! */
|
|
||||||
flag.Parse()
|
|
||||||
if *version {
|
|
||||||
printVersionExit()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If hostname is nil we set it to bind-addr */
|
|
||||||
if *serverHostname == "" {
|
|
||||||
/* If both are blank that ain't too helpful */
|
|
||||||
if *serverBindAddr == "" {
|
|
||||||
log.Fatalf("Cannot have both -bind-addr and -hostname as empty!\n")
|
|
||||||
} else {
|
|
||||||
*serverHostname = *serverBindAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup the server configuration instance and enter as much as we can right now */
|
|
||||||
Config = new(ServerConfig)
|
|
||||||
|
|
||||||
/* Set misc content settings */
|
|
||||||
Config.PageWidth = *pageWidth
|
|
||||||
|
|
||||||
/* Setup various buffer sizes */
|
|
||||||
Config.SocketWriteBufSize = *socketWriteBuf
|
|
||||||
Config.SocketReadBufSize = *socketReadBuf
|
|
||||||
Config.SocketReadMax = *socketReadBuf * *socketReadMax
|
|
||||||
Config.FileReadBufSize = *fileReadBuf
|
|
||||||
|
|
||||||
/* Setup socket deadlines */
|
|
||||||
Config.SocketReadDeadline = *socketReadTimeout
|
|
||||||
Config.SocketWriteDeadline = *socketWriteTimeout
|
|
||||||
|
|
||||||
/* Have to be set AFTER page width variable set */
|
|
||||||
Config.FooterText = formatGophermapFooter(*footerText, !*footerSeparator)
|
|
||||||
|
|
||||||
/* Setup Gophor logging system */
|
|
||||||
Config.SysLog, Config.AccLog = setupLoggers(*logOutput, *logOpts, *systemLogPath, *accessLogPath)
|
|
||||||
|
|
||||||
/* Set CGI support status */
|
|
||||||
if *disableCgi {
|
|
||||||
Config.SysLog.Info("", "CGI support disabled\n")
|
|
||||||
Config.CgiEnabled = false
|
|
||||||
} else {
|
|
||||||
/* Enable CGI */
|
|
||||||
Config.SysLog.Info("", "CGI support enabled\n")
|
|
||||||
Config.CgiEnabled = true
|
|
||||||
|
|
||||||
if *httpCompatCgi {
|
|
||||||
Config.SysLog.Info("", "Enabling HTTP CGI script compatibility\n")
|
|
||||||
executeCgi = executeCgiStripHttp
|
|
||||||
|
|
||||||
/* Specific to CGI buffer */
|
|
||||||
Config.SysLog.Info("", "Max CGI HTTP header read-ahead: %d bytes\n", *httpHeaderBuf)
|
|
||||||
Config.SkipPrefixBufSize = *httpHeaderBuf
|
|
||||||
} else {
|
|
||||||
executeCgi = executeCgiNoHttp
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set safe executable path and setup environments */
|
|
||||||
Config.SysLog.Info("", "Setting safe executable path: %s\n", *safeExecPath)
|
|
||||||
Config.Env = setupExecEnviron(*safeExecPath)
|
|
||||||
Config.CgiEnv = setupInitialCgiEnviron(*safeExecPath, charSet)
|
|
||||||
|
|
||||||
/* Set executable watchdog */
|
|
||||||
Config.SysLog.Info("", "Max executable time: %s\n", *maxExecRunTime)
|
|
||||||
Config.MaxExecRunTime = *maxExecRunTime
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If running as root, get ready to drop privileges */
|
|
||||||
if syscall.Getuid() == 0 || syscall.Getgid() == 0 {
|
|
||||||
log.Fatalf("", "Gophor does not support running as root!\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Enter server dir */
|
|
||||||
enterServerDir(*serverRoot)
|
|
||||||
Config.SysLog.Info("", "Entered server directory: %s\n", *serverRoot)
|
|
||||||
|
|
||||||
/* Setup listeners */
|
|
||||||
listeners := make([]*GophorListener, 0)
|
|
||||||
|
|
||||||
/* If requested, setup unencrypted listener */
|
|
||||||
if *serverPort != 0 {
|
|
||||||
/* If no forward port set, just use regular */
|
|
||||||
if *serverFwdPort == 0 {
|
|
||||||
*serverFwdPort = *serverPort
|
|
||||||
}
|
|
||||||
|
|
||||||
l, err := BeginGophorListen(*serverBindAddr, *serverHostname, strconv.Itoa(*serverPort), strconv.Itoa(*serverFwdPort), *serverRoot)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error setting up (unencrypted) listener: %s\n", err.Error())
|
|
||||||
}
|
|
||||||
listeners = append(listeners, l)
|
|
||||||
} else {
|
|
||||||
log.Fatalf("No valid port to listen on\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup file cache */
|
|
||||||
Config.FileSystem = new(FileSystem)
|
|
||||||
|
|
||||||
/* Check if cache requested disabled */
|
|
||||||
if !*cacheDisabled {
|
|
||||||
/* Init file cache */
|
|
||||||
Config.FileSystem.Init(*cacheSize, *cacheFileSizeMax)
|
|
||||||
|
|
||||||
/* Before file monitor or any kind of new goroutines started,
|
|
||||||
* check if we need to cache generated policy files
|
|
||||||
*/
|
|
||||||
cachePolicyFiles(*serverRoot, *serverDescription, *serverAdmin, *serverGeoloc)
|
|
||||||
|
|
||||||
/* Start file cache freshness checker */
|
|
||||||
startFileMonitor(*fileMonitorFreq)
|
|
||||||
Config.SysLog.Info("", "File caching enabled with: maxcount=%d maxsize=%.3fMB checkfreq=%s\n", *cacheSize, *cacheFileSizeMax, *fileMonitorFreq)
|
|
||||||
} else {
|
|
||||||
/* File caching disabled, init with zero max size so nothing gets cached */
|
|
||||||
Config.FileSystem.Init(2, 0)
|
|
||||||
Config.SysLog.Info("", "File caching disabled\n")
|
|
||||||
|
|
||||||
/* Safe to cache policy files now */
|
|
||||||
cachePolicyFiles(*serverRoot, *serverDescription, *serverAdmin, *serverGeoloc)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup file restrictions and remappings */
|
|
||||||
Config.FileSystem.Restricted = compileUserRestrictedRegex(*restrictedFiles)
|
|
||||||
Config.FileSystem.Remaps = compileUserRemapRegex(*fileRemaps)
|
|
||||||
|
|
||||||
/* Precompile some helpful regex */
|
|
||||||
Config.RgxGophermap = compileGophermapCheckRegex()
|
|
||||||
Config.RgxCgiBin = compileCgiBinCheckRegex()
|
|
||||||
|
|
||||||
/* Return the created listeners slice :) */
|
|
||||||
return listeners
|
|
||||||
}
|
|
||||||
|
|
||||||
func enterServerDir(path string) {
|
|
||||||
err := syscall.Chdir(path)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Error changing dir to server root %s: %s\n", path, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
/* Function does, as function is named */
|
|
||||||
func generateHtmlRedirect(url string) []byte {
|
|
||||||
content :=
|
|
||||||
"<html>\n"+
|
|
||||||
"<head>\n"+
|
|
||||||
"<meta http-equiv=\"refresh\" content=\"1;URL="+url+"\">"+
|
|
||||||
"</head>\n"+
|
|
||||||
"<body>\n"+
|
|
||||||
"You are following an external link to a web site.\n"+
|
|
||||||
"You will be automatically taken to the site shortly.\n"+
|
|
||||||
"If you do not get sent there, please click <A HREF=\""+url+"\">here</A> to go to the web site.\n"+
|
|
||||||
"<p>\n"+
|
|
||||||
"The URL linked is <A HREF=\""+url+"\">"+url+"</A>\n"+
|
|
||||||
"<p>\n"+
|
|
||||||
"Thanks for using Gophor!\n"+
|
|
||||||
"</body>\n"+
|
|
||||||
"</html>\n"
|
|
||||||
|
|
||||||
return []byte(content)
|
|
||||||
}
|
|
@ -1,209 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"bytes"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HttpStripWriter struct {
|
|
||||||
/* Wrapper to io.Writer that reads a predetermined amount into a buffer
|
|
||||||
* then parses the buffer for valid HTTP headers and status code, deciding
|
|
||||||
* whether to strip these headers or returning with an HTTP status code.
|
|
||||||
*/
|
|
||||||
Writer io.Writer
|
|
||||||
SkipBuffer []byte
|
|
||||||
SkipIndex int
|
|
||||||
Err *GophorError
|
|
||||||
|
|
||||||
/* We set underlying write function with a variable, so that each call
|
|
||||||
* to .Write() doesn't have to perform a check every time whether we need
|
|
||||||
* to keep checking for headers to skip.
|
|
||||||
*/
|
|
||||||
WriteFunc func([]byte) (int, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHttpStripWriter(writer io.Writer) *HttpStripWriter {
|
|
||||||
w := &HttpStripWriter{}
|
|
||||||
w.Writer = writer
|
|
||||||
w.WriteFunc = w.WriteCheckForHeaders
|
|
||||||
w.SkipBuffer = make([]byte, Config.SkipPrefixBufSize)
|
|
||||||
w.SkipIndex = 0
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) Size() int {
|
|
||||||
/* Size of the skip buffer */
|
|
||||||
return len(w.SkipBuffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) Available() int {
|
|
||||||
/* How much space have we got left in the skip buffer */
|
|
||||||
return w.Size() - w.SkipIndex
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) AddToSkipBuffer(data []byte) int {
|
|
||||||
/* Figure out how much data we need to add */
|
|
||||||
toAdd := w.Available()
|
|
||||||
if len(data) < toAdd {
|
|
||||||
toAdd = len(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Add the data to the skip buffer! */
|
|
||||||
copy(w.SkipBuffer[w.SkipIndex:], data[:toAdd])
|
|
||||||
w.SkipIndex += toAdd
|
|
||||||
return toAdd
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) ParseHttpHeaderSection() (bool, bool) {
|
|
||||||
/* Check if this is a valid HTTP header section and determine from status if we should continue */
|
|
||||||
validHeaderSection, shouldContinue := false, true
|
|
||||||
for _, header := range bytes.Split(w.SkipBuffer, []byte(DOSLineEnd)) {
|
|
||||||
header = bytes.ToLower(header)
|
|
||||||
|
|
||||||
if bytes.Contains(header, []byte("content-type: ")) {
|
|
||||||
/* This whole header section is now _valid_ */
|
|
||||||
validHeaderSection = true
|
|
||||||
} else if bytes.Contains(header, []byte("status: ")) {
|
|
||||||
/* Try parse status code */
|
|
||||||
statusStr := string(bytes.Split(bytes.TrimPrefix(header, []byte("status: ")), []byte(" "))[0])
|
|
||||||
|
|
||||||
if statusStr == "200" {
|
|
||||||
/* We ignore this */
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Any other values indicate error, we should not continue writing */
|
|
||||||
shouldContinue = false
|
|
||||||
|
|
||||||
/* Try parse error code */
|
|
||||||
errorCode := CgiStatusUnknownErr
|
|
||||||
switch statusStr {
|
|
||||||
case "400":
|
|
||||||
errorCode = CgiStatus400Err
|
|
||||||
case "401":
|
|
||||||
errorCode = CgiStatus401Err
|
|
||||||
case "403":
|
|
||||||
errorCode = CgiStatus403Err
|
|
||||||
case "404":
|
|
||||||
errorCode = CgiStatus404Err
|
|
||||||
case "408":
|
|
||||||
errorCode = CgiStatus408Err
|
|
||||||
case "410":
|
|
||||||
errorCode = CgiStatus410Err
|
|
||||||
case "500":
|
|
||||||
errorCode = CgiStatus500Err
|
|
||||||
case "501":
|
|
||||||
errorCode = CgiStatus501Err
|
|
||||||
case "503":
|
|
||||||
errorCode = CgiStatus503Err
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set struct error */
|
|
||||||
w.Err = &GophorError{ errorCode, nil }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return validHeaderSection, shouldContinue
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) WriteSkipBuffer() (bool, error) {
|
|
||||||
defer func() {
|
|
||||||
w.SkipIndex = 0
|
|
||||||
}()
|
|
||||||
|
|
||||||
/* First try parse the headers, determine what to do next */
|
|
||||||
validHeaders, shouldContinue := w.ParseHttpHeaderSection()
|
|
||||||
|
|
||||||
if validHeaders {
|
|
||||||
/* Valid headers, we don't bother writing. Return whether
|
|
||||||
* shouldContinue whatever value that may be.
|
|
||||||
*/
|
|
||||||
return shouldContinue, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Default is to write skip buffer contents. shouldContinue only
|
|
||||||
* means something as long as we have valid headers.
|
|
||||||
*/
|
|
||||||
_, err := w.Writer.Write(w.SkipBuffer[:w.SkipIndex])
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) FinishUp() *GophorError {
|
|
||||||
/* If SkipBuffer still has contents, in case of data written being less
|
|
||||||
* than w.Size() --> check this data for HTTP headers to strip, parse
|
|
||||||
* any status codes and write this content with underlying writer if
|
|
||||||
* necessary.
|
|
||||||
*/
|
|
||||||
if w.SkipIndex > 0 {
|
|
||||||
w.WriteSkipBuffer()
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return HttpStripWriter error code if set */
|
|
||||||
return w.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) Write(data []byte) (int, error) {
|
|
||||||
/* Write using whatever write function is currently set */
|
|
||||||
return w.WriteFunc(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) WriteRegular(data []byte) (int, error) {
|
|
||||||
/* Regular write function */
|
|
||||||
return w.Writer.Write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *HttpStripWriter) WriteCheckForHeaders(data []byte) (int, error) {
|
|
||||||
split := bytes.Split(data, []byte(DOSLineEnd+DOSLineEnd))
|
|
||||||
if len(split) == 1 {
|
|
||||||
/* Try add these to skip buffer */
|
|
||||||
added := w.AddToSkipBuffer(data)
|
|
||||||
|
|
||||||
if added < len(data) {
|
|
||||||
defer func() {
|
|
||||||
/* Having written skipbuffer after this if clause, set write to regular */
|
|
||||||
w.WriteFunc = w.WriteRegular
|
|
||||||
}()
|
|
||||||
|
|
||||||
doContinue, err := w.WriteSkipBuffer()
|
|
||||||
if !doContinue {
|
|
||||||
return len(data), io.EOF
|
|
||||||
} else if err != nil {
|
|
||||||
return added, err
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Write remaining data not added to skip buffer */
|
|
||||||
count, err := w.Writer.Write(data[added:])
|
|
||||||
if err != nil {
|
|
||||||
return added+count, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(data), nil
|
|
||||||
} else {
|
|
||||||
defer func() {
|
|
||||||
/* No use for skip buffer after this clause, set write to regular */
|
|
||||||
w.WriteFunc = w.WriteRegular
|
|
||||||
w.SkipIndex = 0
|
|
||||||
}()
|
|
||||||
|
|
||||||
/* Try add what we can to skip buffer */
|
|
||||||
added := w.AddToSkipBuffer(append(split[0], []byte(DOSLineEnd+DOSLineEnd)...))
|
|
||||||
|
|
||||||
/* Write skip buffer data if necessary, check if we should continue */
|
|
||||||
doContinue, err := w.WriteSkipBuffer()
|
|
||||||
if !doContinue {
|
|
||||||
return len(data), io.EOF
|
|
||||||
} else if err != nil {
|
|
||||||
return added, err
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Write remaining data not added to skip buffer */
|
|
||||||
count, err := w.Writer.Write(data[added:])
|
|
||||||
if err != nil {
|
|
||||||
return added+count, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(data), nil
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,157 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
/* Prefixes */
|
|
||||||
LogPrefixInfo = ": I :: "
|
|
||||||
LogPrefixError = ": E :: "
|
|
||||||
LogPrefixFatal = ": F :: "
|
|
||||||
|
|
||||||
/* Log output types */
|
|
||||||
LogDisabled = "disable"
|
|
||||||
LogToStderr = "stderr"
|
|
||||||
LogToFile = "file"
|
|
||||||
|
|
||||||
/* Log options */
|
|
||||||
LogTimestamps = "timestamp"
|
|
||||||
LogIps = "ip"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Defines a simple logger interface */
|
|
||||||
type LoggerInterface interface {
|
|
||||||
Info(string, string, ...interface{})
|
|
||||||
Error(string, string, ...interface{})
|
|
||||||
Fatal(string, string, ...interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Logger interface definition that does jack-shit */
|
|
||||||
type NullLogger struct {}
|
|
||||||
func (l *NullLogger) Info(prefix, format string, args ...interface{}) {}
|
|
||||||
func (l *NullLogger) Error(prefix, format string, args ...interface{}) {}
|
|
||||||
func (l *NullLogger) Fatal(prefix, format string, args ...interface{}) {}
|
|
||||||
|
|
||||||
/* A basic logger implemention */
|
|
||||||
type Logger struct {
|
|
||||||
Logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Logger) Info(prefix, format string, args ...interface{}) {
|
|
||||||
l.Logger.Printf(LogPrefixInfo+prefix+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Logger) Error(prefix, format string, args ...interface{}) {
|
|
||||||
l.Logger.Printf(LogPrefixError+prefix+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Logger) Fatal(prefix, format string, args ...interface{}) {
|
|
||||||
l.Logger.Fatalf(LogPrefixFatal+prefix+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Logger implementation that ignores the prefix (e.g. when not printing IPs) */
|
|
||||||
type LoggerNoPrefix struct {
|
|
||||||
Logger *log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LoggerNoPrefix) Info(prefix, format string, args ...interface{}) {
|
|
||||||
/* Ignore the prefix */
|
|
||||||
l.Logger.Printf(LogPrefixInfo+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LoggerNoPrefix) Error(prefix, format string, args ...interface{}) {
|
|
||||||
/* Ignore the prefix */
|
|
||||||
l.Logger.Printf(LogPrefixError+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LoggerNoPrefix) Fatal(prefix, format string, args ...interface{}) {
|
|
||||||
/* Ignore the prefix */
|
|
||||||
l.Logger.Fatalf(LogPrefixFatal+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup global logger */
|
|
||||||
func setupGlobalLogger() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
log.SetOutput(os.Stderr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup the system and access logger interfaces according to supplied output options and logger options */
|
|
||||||
func setupLoggers(logOutput, logOpts, systemLogPath, accessLogPath string) (LoggerInterface, LoggerInterface) {
|
|
||||||
/* Parse the logger options */
|
|
||||||
logIps := false
|
|
||||||
logFlags := 0
|
|
||||||
for _, opt := range strings.Split(logOpts, ",") {
|
|
||||||
switch opt {
|
|
||||||
case "":
|
|
||||||
continue
|
|
||||||
|
|
||||||
case LogTimestamps:
|
|
||||||
logFlags = log.LstdFlags
|
|
||||||
|
|
||||||
case LogIps:
|
|
||||||
logIps = true
|
|
||||||
|
|
||||||
default:
|
|
||||||
log.Fatalf("Unrecognized log opt: %s\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Setup the loggers according to requested logging output */
|
|
||||||
switch logOutput {
|
|
||||||
case "":
|
|
||||||
/* Assume empty means stderr */
|
|
||||||
fallthrough
|
|
||||||
|
|
||||||
case LogToStderr:
|
|
||||||
/* Return two separate stderr loggers */
|
|
||||||
sysLogger := &LoggerNoPrefix{ NewLoggerToStderr(logFlags) }
|
|
||||||
if logIps {
|
|
||||||
return sysLogger, &Logger{ NewLoggerToStderr(logFlags) }
|
|
||||||
} else {
|
|
||||||
return sysLogger, &LoggerNoPrefix{ NewLoggerToStderr(logFlags) }
|
|
||||||
}
|
|
||||||
|
|
||||||
case LogDisabled:
|
|
||||||
/* Return two pointers to same null logger */
|
|
||||||
nullLogger := &NullLogger{}
|
|
||||||
return nullLogger, nullLogger
|
|
||||||
|
|
||||||
case LogToFile:
|
|
||||||
/* Return two separate file loggers */
|
|
||||||
sysLogger := &Logger{ NewLoggerToFile(systemLogPath, logFlags) }
|
|
||||||
if logIps {
|
|
||||||
return sysLogger, &Logger{ NewLoggerToFile(accessLogPath, logFlags) }
|
|
||||||
} else {
|
|
||||||
return sysLogger, &LoggerNoPrefix{ NewLoggerToFile(accessLogPath, logFlags) }
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
log.Fatalf("Unrecognised log output type: %s\n", logOutput)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Helper function to create new standard log.Logger to stderr */
|
|
||||||
func NewLoggerToStderr(logFlags int) *log.Logger {
|
|
||||||
return log.New(os.Stderr, "", logFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Helper function to create new standard log.Logger to file */
|
|
||||||
func NewLoggerToFile(path string, logFlags int) *log.Logger {
|
|
||||||
writer, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to create logger to file %s: %s\n", path, err.Error())
|
|
||||||
}
|
|
||||||
return log.New(writer, "", logFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set the default logger flags before printing version */
|
|
||||||
func printVersionExit() {
|
|
||||||
log.SetFlags(0)
|
|
||||||
log.Printf("%s\n", GophorVersion)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
@ -0,0 +1,9 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"gophor/gopher"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
gopher.Run()
|
||||||
|
}
|
@ -1,154 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* Parse a request string into a path and parameters string */
|
|
||||||
func parseGopherUrl(request string) (*GopherUrl, *GophorError) {
|
|
||||||
if strings.Contains(request, "#") || // we don't support fragments
|
|
||||||
strings.HasPrefix(request, "GET ") { // we don't support HTTP requests
|
|
||||||
return nil, &GophorError{ InvalidRequestErr, nil }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if string contains any ASCII control byte */
|
|
||||||
for i := 0; i < len(request); i += 1 {
|
|
||||||
if request[i] < ' ' || request[i] == 0x7f {
|
|
||||||
return nil, &GophorError{ InvalidRequestErr, nil }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Split into 2 substrings by '?'. Url path and query */
|
|
||||||
split := strings.SplitN(request, "?", 2)
|
|
||||||
|
|
||||||
/* Unescape path */
|
|
||||||
path, err := url.PathUnescape(split[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, &GophorError{ InvalidRequestErr, nil }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Return GopherUrl based on this split request */
|
|
||||||
if len(split) == 1 {
|
|
||||||
return &GopherUrl{ path, "" }, nil
|
|
||||||
} else {
|
|
||||||
return &GopherUrl{ path, split[1] }, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Parse line type from contents */
|
|
||||||
func parseLineType(line string) ItemType {
|
|
||||||
lineLen := len(line)
|
|
||||||
|
|
||||||
if lineLen == 0 {
|
|
||||||
return TypeInfoNotStated
|
|
||||||
} else if lineLen == 1 {
|
|
||||||
/* The only accepted types for a length 1 line */
|
|
||||||
switch ItemType(line[0]) {
|
|
||||||
case TypeEnd:
|
|
||||||
return TypeEnd
|
|
||||||
case TypeEndBeginList:
|
|
||||||
return TypeEndBeginList
|
|
||||||
case TypeComment:
|
|
||||||
return TypeComment
|
|
||||||
case TypeInfo:
|
|
||||||
return TypeInfo
|
|
||||||
case TypeTitle:
|
|
||||||
return TypeTitle
|
|
||||||
default:
|
|
||||||
return TypeUnknown
|
|
||||||
}
|
|
||||||
} else if !strings.Contains(line, string(Tab)) {
|
|
||||||
/* The only accepted types for a line with no tabs */
|
|
||||||
switch ItemType(line[0]) {
|
|
||||||
case TypeComment:
|
|
||||||
return TypeComment
|
|
||||||
case TypeTitle:
|
|
||||||
return TypeTitle
|
|
||||||
case TypeInfo:
|
|
||||||
return TypeInfo
|
|
||||||
case TypeHiddenFile:
|
|
||||||
return TypeHiddenFile
|
|
||||||
case TypeSubGophermap:
|
|
||||||
return TypeSubGophermap
|
|
||||||
default:
|
|
||||||
return TypeInfoNotStated
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ItemType(line[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Parses a line in a gophermap into a new request object */
|
|
||||||
func parseLineRequestString(requestPath *RequestPath, lineStr string) (*Request, *GophorError) {
|
|
||||||
if strings.HasPrefix(lineStr, "/") {
|
|
||||||
/* Assume is absolute (well, seeing server root as '/') */
|
|
||||||
if withinCgiBin(lineStr[1:]) {
|
|
||||||
/* CGI script, parse request path and parameters */
|
|
||||||
url, gophorErr := parseGopherUrl(lineStr[1:])
|
|
||||||
if gophorErr != nil {
|
|
||||||
return nil, gophorErr
|
|
||||||
} else {
|
|
||||||
return &Request{ NewRequestPath(requestPath.RootDir(), url.Path), url.Parameters }, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Regular file, no more parsing */
|
|
||||||
return &Request{ NewRequestPath(requestPath.RootDir(), lineStr[1:]), "" }, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Assume relative to current directory */
|
|
||||||
if withinCgiBin(lineStr) && requestPath.Relative() == "" {
|
|
||||||
/* If begins with cgi-bin and is at root dir, parse as cgi-bin */
|
|
||||||
url, gophorErr := parseGopherUrl(lineStr)
|
|
||||||
if gophorErr != nil {
|
|
||||||
return nil, gophorErr
|
|
||||||
} else {
|
|
||||||
return &Request{ NewRequestPath(requestPath.RootDir(), url.Path), url.Parameters }, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* Regular file, no more parsing */
|
|
||||||
return &Request{ NewRequestPath(requestPath.RootDir(), requestPath.JoinCurDir(lineStr)), "" }, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Split a string according to a rune, that supports delimiting with '\' */
|
|
||||||
func splitStringByRune(str string, r rune) []string {
|
|
||||||
ret := make([]string, 0)
|
|
||||||
buf := ""
|
|
||||||
delim := false
|
|
||||||
for _, c := range str {
|
|
||||||
switch c {
|
|
||||||
case r:
|
|
||||||
if !delim {
|
|
||||||
ret = append(ret, buf)
|
|
||||||
buf = ""
|
|
||||||
} else {
|
|
||||||
buf += string(c)
|
|
||||||
delim = false
|
|
||||||
}
|
|
||||||
|
|
||||||
case '\\':
|
|
||||||
if !delim {
|
|
||||||
delim = true
|
|
||||||
} else {
|
|
||||||
buf += string(c)
|
|
||||||
delim = false
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
if !delim {
|
|
||||||
buf += string(c)
|
|
||||||
} else {
|
|
||||||
buf += "\\"+string(c)
|
|
||||||
delim = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buf) > 0 || len(ret) == 0 {
|
|
||||||
ret = append(ret, buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret
|
|
||||||
}
|
|
@ -1,96 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
/* Filename constants */
|
|
||||||
CapsTxtStr = "caps.txt"
|
|
||||||
RobotsTxtStr = "robots.txt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func cachePolicyFiles(rootDir, description, admin, geoloc string) {
|
|
||||||
/* See if caps txt exists, if not generate */
|
|
||||||
_, err := os.Stat(path.Join(rootDir, CapsTxtStr))
|
|
||||||
if err != nil {
|
|
||||||
/* We need to generate the caps txt and manually load into cache */
|
|
||||||
content := generateCapsTxt(description, admin, geoloc)
|
|
||||||
|
|
||||||
/* Create new file object from generated file contents */
|
|
||||||
fileContents := &GeneratedFileContents{ content }
|
|
||||||
file := &File{ fileContents, sync.RWMutex{}, true, 0 }
|
|
||||||
|
|
||||||
/* Trigger a load contents just to set it as fresh etc */
|
|
||||||
file.CacheContents()
|
|
||||||
|
|
||||||
/* No need to worry about mutexes here, no other goroutines running yet */
|
|
||||||
Config.FileSystem.CacheMap.Put(rootDir+"/"+CapsTxtStr, file)
|
|
||||||
Config.SysLog.Info("", "Generated policy file: %s\n", rootDir+"/"+CapsTxtStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* See if robots txt exists, if not generate */
|
|
||||||
_, err = os.Stat(rootDir+"/"+RobotsTxtStr)
|
|
||||||
if err != nil {
|
|
||||||
/* We need to generate the robots txt and manually load into cache */
|
|
||||||
content := generateRobotsTxt()
|
|
||||||
|
|
||||||
/* Create new file object from generated file contents */
|
|
||||||
fileContents := &GeneratedFileContents{ content }
|
|
||||||
file := &File{ fileContents, sync.RWMutex{}, true, 0 }
|
|
||||||
|
|
||||||
/* Trigger a load contents just to set it as fresh etc */
|
|
||||||
file.CacheContents()
|
|
||||||
|
|
||||||
/* No need to worry about mutexes here, no other goroutines running yet */
|
|
||||||
Config.FileSystem.CacheMap.Put(rootDir+"/"+RobotsTxtStr, file)
|
|
||||||
Config.SysLog.Info("", "Generated policy file: %s\n", rootDir+"/"+RobotsTxtStr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func generatePolicyHeader(filename string) string {
|
|
||||||
text := "# This is an automatically generated"+DOSLineEnd
|
|
||||||
text += "# server policy file: "+filename+DOSLineEnd
|
|
||||||
text += "#"+DOSLineEnd
|
|
||||||
text += "# Eat the rich ~GophorDev"+DOSLineEnd
|
|
||||||
return text
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateCapsTxt(description, admin, geoloc string) []byte {
|
|
||||||
text := "CAPS"+DOSLineEnd
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += generatePolicyHeader(CapsTxtStr)
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += "CapsVersion=1"+DOSLineEnd
|
|
||||||
text += "ExpireCapsAfter=1800"+DOSLineEnd
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += "PathDelimeter=/"+DOSLineEnd
|
|
||||||
text += "PathIdentity=."+DOSLineEnd
|
|
||||||
text += "PathParent=.."+DOSLineEnd
|
|
||||||
text += "PathParentDouble=FALSE"+DOSLineEnd
|
|
||||||
text += "PathEscapeCharacter=\\"+DOSLineEnd
|
|
||||||
text += "PathKeepPreDelimeter=FALSE"+DOSLineEnd
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += "ServerSoftware=Gophor"+DOSLineEnd
|
|
||||||
text += "ServerSoftwareVersion="+GophorVersion+DOSLineEnd
|
|
||||||
text += "ServerDescription="+description+DOSLineEnd
|
|
||||||
text += "ServerGeolocationString="+geoloc+DOSLineEnd
|
|
||||||
// text += "ServerDefaultEncoding=ascii"+DOSLineEnd
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += "ServerAdmin="+admin+DOSLineEnd
|
|
||||||
return []byte(text)
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateRobotsTxt() []byte {
|
|
||||||
text := generatePolicyHeader(RobotsTxtStr)
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += "Usage-agent: *"+DOSLineEnd
|
|
||||||
text += "Disallow: *"+DOSLineEnd
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += "Crawl-delay: 99999"+DOSLineEnd
|
|
||||||
text += DOSLineEnd
|
|
||||||
text += "# This server does not support scraping"+DOSLineEnd
|
|
||||||
return []byte(text)
|
|
||||||
}
|
|
@ -1,94 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
FileRemapSeparatorStr = " -> "
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileRemap struct {
|
|
||||||
Regex *regexp.Regexp
|
|
||||||
Template string
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Pre-compile gophermap file string regex */
|
|
||||||
func compileGophermapCheckRegex() *regexp.Regexp {
|
|
||||||
return regexp.MustCompile(`^(|.+/|.+\.)gophermap$`)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Pre-compile cgi-bin path string regex */
|
|
||||||
func compileCgiBinCheckRegex() *regexp.Regexp {
|
|
||||||
return regexp.MustCompile(`^cgi-bin(|/.*)$`)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compile a user supplied new line separated list of regex statements */
|
|
||||||
func compileUserRestrictedRegex(restrictions string) []*regexp.Regexp {
|
|
||||||
/* Return slice */
|
|
||||||
restrictedRegex := make([]*regexp.Regexp, 0)
|
|
||||||
|
|
||||||
/* Split the user supplied regex statements by new line */
|
|
||||||
for _, expr := range strings.Split(restrictions, "\n") {
|
|
||||||
/* Empty expression, skip */
|
|
||||||
if len(expr) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Try compile regex */
|
|
||||||
regex, err := regexp.Compile(expr)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed compiling user supplied regex: %s\n", expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Append restricted */
|
|
||||||
restrictedRegex = append(restrictedRegex, regex)
|
|
||||||
Config.SysLog.Info("", "Compiled restricted: %s\n", expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return restrictedRegex
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compile a user supplied new line separated list of file remap regex statements */
|
|
||||||
func compileUserRemapRegex(remaps string) []*FileRemap {
|
|
||||||
/* Return slice */
|
|
||||||
fileRemaps := make([]*FileRemap, 0)
|
|
||||||
|
|
||||||
/* Split the user supplied regex statements by new line */
|
|
||||||
for _, expr := range strings.Split(remaps, "\n") {
|
|
||||||
/* Empty expression, skip */
|
|
||||||
if len(expr) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Split into alias and remap string (MUST BE LENGTH 2) */
|
|
||||||
split := strings.Split(expr, FileRemapSeparatorStr)
|
|
||||||
if len(split) != 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Try compile regex */
|
|
||||||
regex, err := regexp.Compile("(?m)"+strings.TrimPrefix(split[0], "/")+"$")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed compiling user supplied regex: %s\n", expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Append file remapper */
|
|
||||||
fileRemaps = append(fileRemaps, &FileRemap{ regex, strings.TrimPrefix(split[1], "/") })
|
|
||||||
Config.SysLog.Info("", "Compiled remap: %s\n", expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileRemaps
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if file path is gophermap */
|
|
||||||
func isGophermap(path string) bool {
|
|
||||||
return Config.RgxGophermap.MatchString(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if file path within cgi-bin */
|
|
||||||
func withinCgiBin(path string) bool {
|
|
||||||
return Config.RgxCgiBin.MatchString(path)
|
|
||||||
}
|
|
@ -1,130 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type RequestPath struct {
|
|
||||||
/* Path structure to allow hosts at
|
|
||||||
* different roots while maintaining relative
|
|
||||||
* and absolute path names for filesystem reading
|
|
||||||
*/
|
|
||||||
|
|
||||||
Root string
|
|
||||||
Rel string
|
|
||||||
Abs string
|
|
||||||
Select string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRequestPath(rootDir, relPath string) *RequestPath {
|
|
||||||
return &RequestPath{ rootDir, relPath, path.Join(rootDir, strings.TrimSuffix(relPath, "/")), relPath }
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) RemapPath(newPath string) *RequestPath {
|
|
||||||
requestPath := NewRequestPath(rp.RootDir(), sanitizeRawPath(rp.RootDir(), newPath))
|
|
||||||
requestPath.Select = rp.Relative()
|
|
||||||
return requestPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) RootDir() string {
|
|
||||||
return rp.Root
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) Relative() string {
|
|
||||||
return rp.Rel
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) Absolute() string {
|
|
||||||
return rp.Abs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) Selector() string {
|
|
||||||
if rp.Select == "." {
|
|
||||||
return "/"
|
|
||||||
} else {
|
|
||||||
return "/"+rp.Select
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) JoinRel(extPath string) string {
|
|
||||||
return path.Join(rp.Relative(), extPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) JoinAbs(extPath string) string {
|
|
||||||
return path.Join(rp.Absolute(), extPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) JoinSelector(extPath string) string {
|
|
||||||
return path.Join(rp.Selector(), extPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) HasAbsPrefix(prefix string) bool {
|
|
||||||
return strings.HasPrefix(rp.Absolute(), prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) HasRelPrefix(prefix string) bool {
|
|
||||||
return strings.HasPrefix(rp.Relative(), prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) HasRelSuffix(suffix string) bool {
|
|
||||||
return strings.HasSuffix(rp.Relative(), suffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) HasAbsSuffix(suffix string) bool {
|
|
||||||
return strings.HasSuffix(rp.Absolute(), suffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) TrimRelSuffix(suffix string) string {
|
|
||||||
return strings.TrimSuffix(strings.TrimSuffix(rp.Relative(), suffix), "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) TrimAbsSuffix(suffix string) string {
|
|
||||||
return strings.TrimSuffix(strings.TrimSuffix(rp.Absolute(), suffix), "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) JoinCurDir(extPath string) string {
|
|
||||||
return path.Join(path.Dir(rp.Relative()), extPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rp *RequestPath) JoinRootDir(extPath string) string {
|
|
||||||
return path.Join(rp.RootDir(), extPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Request struct {
|
|
||||||
/* Holds onto a request path to the filesystem and
|
|
||||||
* a string slice of parsed parameters (usually nil
|
|
||||||
* or length 1)
|
|
||||||
*/
|
|
||||||
|
|
||||||
Path *RequestPath
|
|
||||||
Parameters string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSanitizedRequest(rootDir string, url *GopherUrl) *Request {
|
|
||||||
return &Request{
|
|
||||||
NewRequestPath(
|
|
||||||
rootDir,
|
|
||||||
sanitizeRawPath(rootDir, url.Path),
|
|
||||||
),
|
|
||||||
url.Parameters,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Sanitize a request path string */
|
|
||||||
func sanitizeRawPath(rootDir, relPath string) string {
|
|
||||||
/* Start with a clean :) */
|
|
||||||
relPath = path.Clean(relPath)
|
|
||||||
|
|
||||||
if path.IsAbs(relPath) {
|
|
||||||
/* Is absolute. Try trimming root and leading '/' */
|
|
||||||
relPath = strings.TrimPrefix(strings.TrimPrefix(relPath, rootDir), "/")
|
|
||||||
} else {
|
|
||||||
/* Is relative. If back dir traversal, give them root */
|
|
||||||
if strings.HasPrefix(relPath, "..") {
|
|
||||||
relPath = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return relPath
|
|
||||||
}
|
|
@ -1,54 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Responder struct {
|
|
||||||
Conn *BufferedDeadlineConn
|
|
||||||
Host *ConnHost
|
|
||||||
Client *ConnClient
|
|
||||||
Request *Request
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewResponder(conn *BufferedDeadlineConn, host *ConnHost, client *ConnClient, request *Request) *Responder {
|
|
||||||
return &Responder{ conn, host, client, request }
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Responder) AccessLogInfo(format string, args ...interface{}) {
|
|
||||||
Config.AccLog.Info("("+r.Client.Ip()+") ", format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Responder) AccessLogError(format string, args ...interface{}) {
|
|
||||||
Config.AccLog.Error("("+r.Client.Ip()+") ", format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Responder) Write(b []byte) (int, error) {
|
|
||||||
return r.Conn.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Responder) WriteData(data []byte) *GophorError {
|
|
||||||
err := r.Conn.WriteData(data)
|
|
||||||
if err != nil {
|
|
||||||
return &GophorError{ SocketWriteErr, err }
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Responder) WriteRaw(reader io.Reader) *GophorError {
|
|
||||||
err := r.Conn.WriteRaw(reader)
|
|
||||||
if err != nil {
|
|
||||||
return &GophorError{ SocketWriteRawErr, err }
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Responder) CloneWithRequest(request *Request) *Responder {
|
|
||||||
/* Create new copy of Responder only with request differring */
|
|
||||||
return &Responder{
|
|
||||||
r.Conn,
|
|
||||||
r.Host,
|
|
||||||
r.Client,
|
|
||||||
request,
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,71 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Worker struct {
|
|
||||||
Conn *BufferedDeadlineConn
|
|
||||||
Host *ConnHost
|
|
||||||
Client *ConnClient
|
|
||||||
RootDir string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (worker *Worker) Serve() {
|
|
||||||
defer worker.Conn.Close()
|
|
||||||
|
|
||||||
line, err := worker.Conn.ReadLine()
|
|
||||||
if err != nil {
|
|
||||||
Config.SysLog.Error("", "Error reading from socket port %s: %s\n", worker.Host.Port(), err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Drop up to first tab */
|
|
||||||
received := strings.Split(string(line), Tab)[0]
|
|
||||||
|
|
||||||
/* Handle URL request if presented */
|
|
||||||
lenBefore := len(received)
|
|
||||||
received = strings.TrimPrefix(received, "URL:")
|
|
||||||
switch len(received) {
|
|
||||||
case lenBefore-4:
|
|
||||||
/* Send an HTML redirect to supplied URL */
|
|
||||||
Config.AccLog.Info("("+worker.Client.Ip()+") ", "Redirecting to %s\n", received)
|
|
||||||
worker.Conn.Write(generateHtmlRedirect(received))
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
/* Do nothing */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Create GopherUrl object from request string */
|
|
||||||
url, gophorErr := parseGopherUrl(received)
|
|
||||||
if gophorErr == nil {
|
|
||||||
/* Create new request from url object */
|
|
||||||
request := NewSanitizedRequest(worker.RootDir, url)
|
|
||||||
|
|
||||||
/* Create new responder from request */
|
|
||||||
responder := NewResponder(worker.Conn, worker.Host, worker.Client, request)
|
|
||||||
|
|
||||||
/* Handle request with supplied responder */
|
|
||||||
gophorErr = Config.FileSystem.HandleRequest(responder)
|
|
||||||
if gophorErr == nil {
|
|
||||||
/* Log success to access and return! */
|
|
||||||
responder.AccessLogInfo("Served: %s\n", request.Path.Absolute())
|
|
||||||
return
|
|
||||||
} else {
|
|
||||||
/* Log failure to access */
|
|
||||||
responder.AccessLogError("Failed to serve: %s\n", request.Path.Absolute())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Log serve failure to error to system */
|
|
||||||
Config.SysLog.Error("", gophorErr.Error())
|
|
||||||
|
|
||||||
/* Generate response bytes from error code */
|
|
||||||
errResponse := generateGopherErrorResponseFromCode(gophorErr.Code)
|
|
||||||
|
|
||||||
/* If we got response bytes to send? SEND 'EM! */
|
|
||||||
if errResponse != nil {
|
|
||||||
/* No gods. No masters. We don't care about error checking here */
|
|
||||||
worker.Conn.WriteData(errResponse)
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue