// Plugin is a typewhich holds information about a plugin. typePlugin struct { // ServerType is the typeof server this plugin isfor. // Can be empty ifnot applicable, orif the plugin // can associate with any server type. ServerType string
// Action is the plugin's setup function, if associated // with a directive in the Caddyfile. Action SetupFunc }
// plugins is a mapof server typetomapof plugin name to // Plugin. These are the "general" plugins that may or may // not be associated with a specific server type. If it's // applicable to multiple server types or the server typeis // irrelevant, the key is empty string (""). But all plugins // must have a name. plugins = make(map[string]map[string]Plugin)
// ServerType contains information about a server type. type ServerTypestruct{ // Function that returns the list of directives, in // execution order, that are valid for this server // type. Directives should be one word if possible // and lower-cased. Directivesfunc() []string
// DefaultInput returns a default config input if none // is otherwise loaded. This is optional, but highly // recommended, otherwise a blank Caddyfile will be // used. DefaultInputfunc()Input
// The function that produces a new server type context. // This will be called when a new Caddyfile is being // loaded, parsed, and executed independently of any // startup phases before this one. It's a way to keep // each set of server instances separate and to reduce // the amount of global state you need. NewContextfunc(inst *Instance)Context }
type Instancestruct{ // serverType is the name of the instance's server type serverType string
// caddyfileInput is the input configuration text used for this process caddyfileInput Input
// wg is used to wait for all servers to shut down wg *sync.WaitGroup
// context is the context created for this instance, // used to coordinate the setting up of the server type context Context
// servers is the list of servers with their listeners servers []ServerListener
// these callbacks execute when certain events occur OnFirstStartup []func() error // starting, not as part of a restart OnStartup []func() error // starting, even as part of a restart OnRestart []func() error // before restart commences OnRestartFailed []func() error // if restart failed OnShutdown []func() error // stopping, even as part of a restart OnFinalShutdown []func() error // stopping, not as part of a restart
// storing values on an instance is preferable to // global state because these will get garbage- // collected after in-process reloads when the // old instances are destroyed; use StorageMu // to access this value safely Storagemap[interface{}]interface{} StorageMu sync.RWMutex }
Contextinterface { // Called after the Caddyfile is parsed into server // blocks but before the directives are executed, // this method gives you an opportunity to inspect // the server blocks and prepare for the execution // of directives. Return the server blocks (which // you may modify, if desired) and an error, if any. // The first argument is the name or path to the // configuration file (Caddyfile). // // This function can be a no-op and simply return its // input if there is nothing to do here. InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error)
// This is what Caddy calls to make server instances. // By this time, all directives have been executed and, // presumably, the context has enough state to produce // server instances for Caddy to start. MakeServers() ([]Server, error) }
funcexecuteDirectives(inst *Instance, filename string, directives []string, sblocks []caddyfile.ServerBlock, justValidate bool)error { // a directive for all server blocks before going to the next directive. // This is important mainly due to the parsing callbacks (below). for _, dir := range directives { for i, sb := range sblocks { for j, key := range sb.Keys { // Execute directive if it is in the server block if tokens, ok := sb.Tokens[dir]; ok { controller := &Controller{ instance: inst, Key: key, Dispenser: caddyfile.NewDispenserTokens(filename, tokens), OncePerServerBlock: func(f func()error) error { var err error once.Do(func() { err = f() }) return err }, ServerBlockIndex: i, ServerBlockKeyIndex: j, ServerBlockKeys: sb.Keys, ServerBlockStorage: storages[i][dir], }
....... // we must map (group) each config to a bind address groups, err := groupConfigsByListenAddr(h.configs) if err != nil { returnnil, err } // then we create a server for each group var servers []caddy.Server for addr, group := range groups { // switch on addr switch tr, _ := parse.Transport(addr); tr { case transport.DNS: s, err := NewServer(addr, group) if err != nil { returnnil, err } servers = append(servers, s)
case transport.TLS: s, err := NewServerTLS(addr, group) if err != nil { returnnil, err } servers = append(servers, s)
case transport.GRPC: s, err := NewServergRPC(addr, group) if err != nil { returnnil, err } servers = append(servers, s)
case transport.HTTPS: s, err := NewServerHTTPS(addr, group) if err != nil { returnnil, err } servers = append(servers, s) }
ype Server struct { Addr string // Address we listen on
server [2]*dns.Server// 0 is a net.Listener, 1 is a net.PacketConn (a *UDPConn) in our case. m sync.Mutex// protects the servers
zones map[string]*Config // zones keyed by their address dnsWg sync.WaitGroup// used to wait on outstanding connections graceTimeout time.Duration// the maximum duration of a graceful shutdown trace trace.Trace// the trace plugin for the server debug bool // disable recover() classChaos bool // allow non-INET class queries }
// Log errors that may be returned from Serve() calls, // these errors should only be occurring in the server loop. gofunc() { for { select { case err := <-errChan: if err != nil { if !strings.Contains(err.Error(), "use of closed network connection") { // this error is normal when closing the listener; see https://github.com/golang/go/issues/4373 log.Println(err) } } case <-stopChan: return } } }()
// Serve starts the server with an existing listener. It blocks until the server stops. // This implements caddy.TCPServer interface. func (s *Server) Serve(l net.Listener) error { s.m.Lock() s.server[tcp] = &dns.Server{Listener: l, Net: "tcp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { ctx := context.WithValue(context.Background(), Key{}, s) s.ServeDNS(ctx, w, r) })} s.m.Unlock()
return s.server[tcp].ActivateAndServe() }
// ServePacket starts the server with an existing packetconn. It blocks until the server stops. // This implements caddy.UDPServer interface. func (s *Server) ServePacket(p net.PacketConn) error { s.m.Lock() s.server[udp] = &dns.Server{PacketConn: p, Net: "udp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { ctx := context.WithValue(context.Background(), Key{}, s) s.ServeDNS(ctx, w, r) })} s.m.Unlock()
// ServeDNS is the entry point for every request to the address that s // is bound to. It acts as a multiplexer for the requests zonename as // defined in the request so that the correct zone // (configuration and plugin stack) will handle the request. func(s *Server)ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) { // The default dns.Mux checks the question section size, but we have our // own mux here. Check if we have a question section. If not drop them here. // ......
// Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer. w = request.NewScrubWriter(r, w)
for { l := len(q[off:]) for i := 0; i < l; i++ { b[i] = q[off+i] // normalize the name for the lookup if b[i] >= 'A' && b[i] <= 'Z' { b[i] |= ('a' - 'A') } }
if h, ok := s.zones[string(b[:l])]; ok { if r.Question[0].Qtype != dns.TypeDS { if h.FilterFunc == nil { rcode, _ := h.pluginChain.ServeDNS(ctx, w, r) if !plugin.ClientWrite(rcode) { errorFunc(s.Addr, w, r, rcode) } return } // FilterFunc is set, call it to see if we should use this handler. // This is given to full query name. if h.FilterFunc(q) { rcode, _ := h.pluginChain.ServeDNS(ctx, w, r) if !plugin.ClientWrite(rcode) { errorFunc(s.Addr, w, r, rcode) } return } } // The type is DS, keep the handler, but keep on searching as maybe we are serving // the parent as well and the DS should be routed to it - this will probably *misroute* DS // queries to a possibly grand parent, but there is no way for us to know at this point // if there is an actually delegation from grandparent -> parent -> zone. // In all fairness: direct DS queries should not be needed. dshandler = h } off, end = dns.NextLabel(q, off) if end { break } }
if r.Question[0].Qtype == dns.TypeDS && dshandler != nil && dshandler.pluginChain != nil { // DS request, and we found a zone, use the handler for the query. rcode, _ := dshandler.pluginChain.ServeDNS(ctx, w, r) if !plugin.ClientWrite(rcode) { errorFunc(s.Addr, w, r, rcode) } return }
// Wildcard match, if we have found nothing try the root zone as a last resort. if h, ok := s.zones["."]; ok && h.pluginChain != nil { rcode, _ := h.pluginChain.ServeDNS(ctx, w, r) if !plugin.ClientWrite(rcode) { errorFunc(s.Addr, w, r, rcode) } return }