reuse buffered io objects + byte slices where possible using sync pools

Signed-off-by: kim (grufwub) <grufwub@gmail.com>
master
kim (grufwub) 4 years ago
parent 5c20543c74
commit b311d2036c

@ -0,0 +1,112 @@
package core
import (
"bufio"
"io"
"sync"
)
var (
connBufferedReaderPool sync.Pool
connBufferedWriterPool sync.Pool
fileBufferedReaderPool sync.Pool
fileReadBufferPool sync.Pool
)
func newConnBufferedReaderPool(size int) sync.Pool {
return sync.Pool{
New: func() interface{} {
return bufio.NewReaderSize(nil, size)
},
}
}
func newConnBufferedWriterPool(size int) sync.Pool {
return sync.Pool{
New: func() interface{} {
return bufio.NewWriterSize(nil, size)
},
}
}
func newFileBufferedReaderPool(size int) sync.Pool {
return sync.Pool{
New: func() interface{} {
return bufio.NewReaderSize(nil, size)
},
}
}
func newFileReadBufferPool(size int) sync.Pool {
return sync.Pool{
New: func() interface{} {
return make([]byte, size)
},
}
}
func getConnBufferedReader(r io.Reader) *bufio.Reader {
// Get buffered reader
br := connBufferedReaderPool.Get().(*bufio.Reader)
// Reset to new reader
br.Reset(r)
// Return!
return br
}
func putConnBufferedReader(br *bufio.Reader) {
// Reset to ensure not hanging onto old reader
br.Reset(nil)
// Put back in pool
connBufferedReaderPool.Put(br)
}
func getConnBufferedWriter(w io.Writer) *bufio.Writer {
// Get buffered writer
bw := connBufferedWriterPool.Get().(*bufio.Writer)
// Reset to new writer
bw.Reset(w)
// Return!
return bw
}
func putConnBufferedWriter(bw *bufio.Writer) {
// Reset to ensure not hanging onto old writer
bw.Reset(nil)
// Put back in pool
connBufferedWriterPool.Put(bw)
}
func getFileBufferedReader(r io.Reader) *bufio.Reader {
// Get buffered reader
br := fileBufferedReaderPool.Get().(*bufio.Reader)
// Reset to new reader
br.Reset(r)
// Return!
return br
}
func putFileBufferedReader(br *bufio.Reader) {
// Reset to ensure not hanging onto old reader
br.Reset(nil)
// Put back in pool
fileBufferedReaderPool.Put(br)
}
func getFileReadBuffer() []byte {
return fileReadBufferPool.Get().([]byte)
}
func putFileReadBuffer(b []byte) {
fileReadBufferPool.Put(b)
}

@ -14,12 +14,6 @@ var (
// connWriteDeadline specifies the connection write deadline
connWriteDeadline time.Duration
// connReadBufSize specifies the connection read buffer size
connReadBufSize int
// connWriteBufSize specifies the connection write buffer size
connWriteBufSize int
// connReadMax specifies the connection read max (in bytes)
connReadMax int
)
@ -48,18 +42,19 @@ func (c *deadlineConn) Close() error {
// Conn wraps a DeadlineConn with a buffer
type conn struct {
buf *bufio.ReadWriter
cl io.Closer
br *bufio.Reader
bw *bufio.Writer
cl io.Closer
}
// wrapConn wraps a net.Conn in DeadlineConn, then within Conn and returns the result
// wrapConn wraps a net.Conn in deadlineConn, then within conn and returns the result
func wrapConn(c net.Conn) *conn {
deadlineConn := &deadlineConn{c}
buf := bufio.NewReadWriter(
bufio.NewReaderSize(deadlineConn, connReadBufSize),
bufio.NewWriterSize(deadlineConn, connWriteBufSize),
)
return &conn{buf, deadlineConn}
return &conn{
br: getConnBufferedReader(deadlineConn),
bw: getConnBufferedWriter(deadlineConn),
cl: deadlineConn,
}
}
// ReadLine reads a single line and returns the result, or nil and error
@ -70,7 +65,7 @@ func (c *conn) ReadLine() ([]byte, Error) {
// Read!
for len(b) < connReadMax {
// read the line
line, isPrefix, err := c.buf.ReadLine()
line, isPrefix, err := c.br.ReadLine()
if err != nil {
return nil, WrapError(ConnReadErr, err)
}
@ -89,7 +84,7 @@ func (c *conn) ReadLine() ([]byte, Error) {
// WriteBytes writes a byte slice to the buffer and returns error status
func (c *conn) Write(b []byte) Error {
_, err := c.buf.Write(b)
_, err := c.bw.Write(b)
if err != nil {
return WrapError(ConnWriteErr, err)
}
@ -102,7 +97,7 @@ func (c *conn) ReadFrom(r io.Reader) Error {
// a ReadFrom method implemented, it will force the buffer to
// use it's own internal byte buffer along with the deadlineConn's
// Write implementation (forcing the deadline to be regularly updated)
_, err := c.buf.ReadFrom(r)
_, err := c.bw.ReadFrom(r)
if err != nil {
return WrapError(ConnWriteErr, err)
}
@ -111,15 +106,26 @@ func (c *conn) ReadFrom(r io.Reader) Error {
// Writer returns the underlying buffer wrapped conn writer
func (c *conn) Writer() io.Writer {
return c.buf.Writer
return c.bw
}
// Close flushes the underlying buffer then closes the conn
// Close flushes the underlying buffer, closes the conn then puts
// the sync.Pool conn buffers back
func (c *conn) Close() Error {
err := c.buf.Flush()
err = c.cl.Close()
if err != nil {
return WrapError(ConnCloseErr, err)
// Flush + close
err1 := c.bw.Flush()
err2 := c.cl.Close()
// Put buffers back
putConnBufferedReader(c.br)
putConnBufferedWriter(c.bw)
// If either errors, wrap. Else return none
if err2 != nil {
return WrapError(ConnCloseErr, err2)
}
if err1 != nil {
return WrapError(ConnWriteErr, err1)
}
return nil
}

@ -1,7 +1,6 @@
package core
import (
"bufio"
"io"
"os"
"sort"
@ -103,9 +102,9 @@ func (fs *FileSystemObject) ReadFile(fd *os.File) ([]byte, Error) {
// Return slice
ret := make([]byte, 0)
// Read buffer
buf := make([]byte, fileReadBufSize)
rd := bufio.NewReaderSize(fd, fileReadBufSize)
// Read buffers
buf := getFileReadBuffer()
rd := getFileBufferedReader(fd)
// Read through file until null bytes / error
for {
@ -124,13 +123,18 @@ func (fs *FileSystemObject) ReadFile(fd *os.File) ([]byte, Error) {
}
}
// Put back buffers
putFileReadBuffer(buf)
putFileBufferedReader(rd)
// Return!
return ret, nil
}
// ScanFile scans a supplied file at file descriptor, using iterator function
func (fs *FileSystemObject) ScanFile(fd *os.File, iterator func(string) bool) Error {
// Buffered reader
rd := bufio.NewReaderSize(fd, fileReadBufSize)
// Read buffers
rd := getFileBufferedReader(fd)
// Iterate through file!
for {
@ -168,6 +172,10 @@ func (fs *FileSystemObject) ScanFile(fd *os.File, iterator func(string) bool) Er
b = nil
}
// Put back buffers
putFileBufferedReader(rd)
// Return no errors :)
return nil
}

@ -101,11 +101,14 @@ func ParseFlagsAndSetup(proto string, errorMessageFunc func(ErrorCode) string) {
SystemLog.Fatal(listenerBeginFailStr, protocol, Hostname, FwdPort, BindAddr, Port, err.Error())
}
// Host buffer sizes
connReadBufSize = int(*cReadBuf)
connWriteBufSize = int(*cWriteBuf)
// Setup the sync pools
connBufferedReaderPool = newConnBufferedReaderPool(int(*cReadBuf))
connBufferedWriterPool = newConnBufferedWriterPool(int(*cWriteBuf))
fileBufferedReaderPool = newFileBufferedReaderPool(int(*fReadBuf))
fileReadBufferPool = newFileReadBufferPool(int(*fReadBuf))
// Conn read max
connReadMax = int(*cReadMax)
fileReadBufSize = int(*fReadBuf)
// FileSystemObject (and related) setup
fileSizeMax = int64(1048576.0 * *cacheMax) // gets megabytes value in bytes

@ -38,9 +38,16 @@ func serve(client *core.Client) {
// Handle the request!
err = core.FileSystem.HandleClient(
// Current client
client,
// Current request
request,
// New file contents function
newFileContents,
// Handle directory function
func(fs *core.FileSystemObject, client *core.Client, fd *os.File, p *core.Path) core.Error {
// First check for gophermap, create gophermap Path object
gophermap := p.JoinPath("gophermap")
@ -50,6 +57,8 @@ func serve(client *core.Client) {
if err == nil {
stat, osErr := fd2.Stat()
if osErr == nil {
// Fetch gophermap and defer close
defer fd2.Close()
return fs.FetchFile(client, fd2, stat, gophermap, newFileContents)
}
@ -66,8 +75,13 @@ func serve(client *core.Client) {
// Scan directory and build lines
err = fs.ScanDirectory(
// Directory fd
fd,
// Directory path
p,
// Iter function
func(file os.FileInfo, fp *core.Path) {
// Append new formatted file listing (if correct type)
dirContents = appendFileListing(dirContents, file, fp)

Loading…
Cancel
Save