forked from juju/juju
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapiserver.go
1084 lines (976 loc) · 34 KB
/
apiserver.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package apiserver
import (
"crypto/tls"
"crypto/x509"
"io"
"log"
"net"
"net/http"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/bmizerany/pat"
"github.com/juju/errors"
"github.com/juju/loggo"
"github.com/juju/pubsub"
"github.com/juju/utils"
"github.com/juju/utils/clock"
"github.com/prometheus/client_golang/prometheus"
"golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert"
"gopkg.in/juju/names.v2"
"gopkg.in/macaroon-bakery.v1/httpbakery"
"gopkg.in/tomb.v1"
"github.com/juju/juju/apiserver/authentication"
"github.com/juju/juju/apiserver/common"
"github.com/juju/juju/apiserver/common/apihttp"
"github.com/juju/juju/apiserver/common/crossmodel"
"github.com/juju/juju/apiserver/facade"
"github.com/juju/juju/apiserver/logsink"
"github.com/juju/juju/apiserver/observer"
"github.com/juju/juju/apiserver/params"
"github.com/juju/juju/apiserver/websocket"
"github.com/juju/juju/resource"
"github.com/juju/juju/resource/resourceadapters"
"github.com/juju/juju/rpc"
"github.com/juju/juju/rpc/jsoncodec"
"github.com/juju/juju/state"
"gopkg.in/macaroon-bakery.v1/bakery"
)
var logger = loggo.GetLogger("juju.apiserver")
var defaultHTTPMethods = []string{"GET", "POST", "HEAD", "PUT", "DELETE", "OPTIONS"}
// These vars define how we rate limit incoming connections.
const (
defaultLoginRateLimit = 10 // concurrent login operations
defaultLoginMinPause = 100 * time.Millisecond
defaultLoginMaxPause = 1 * time.Second
defaultLoginRetryPause = 5 * time.Second
defaultConnMinPause = 0 * time.Millisecond
defaultConnMaxPause = 5 * time.Second
defaultConnLookbackWindow = 1 * time.Second
defaultConnLowerThreshold = 1000 // connections per second
defaultConnUpperThreshold = 100000 // connections per second
defaultLogSinkRateLimitBurst = 1000
defaultLogSinkRateLimitRefill = time.Millisecond
)
// Server holds the server side of the API.
type Server struct {
tomb tomb.Tomb
clock clock.Clock
pingClock clock.Clock
wg sync.WaitGroup
statePool *state.StatePool
lis net.Listener
tag names.Tag
dataDir string
logDir string
limiter utils.Limiter
loginRetryPause time.Duration
validator LoginValidator
facades *facade.Registry
modelUUID string
loginAuthCtxt *authContext
offerAuthCtxt *crossmodel.AuthContext
lastConnectionID uint64
centralHub *pubsub.StructuredHub
newObserver observer.ObserverFactory
connCount int64
totalConn int64
loginAttempts int64
certChanged <-chan params.StateServingInfo
tlsConfig *tls.Config
allowModelAccess bool
logSinkWriter io.WriteCloser
logsinkRateLimitConfig logsink.RateLimitConfig
dbloggers dbloggers
// mu guards the fields below it.
mu sync.Mutex
// publicDNSName_ holds the value that will be returned in
// LoginResult.PublicDNSName. Currently this is set once from
// AutocertDNSName and does not change but in the future it
// may change when a server certificate is explicitly set,
// hence it's here guarded by the mutex.
publicDNSName_ string
// cert holds the current certificate used for tls.Config.
cert *tls.Certificate
// certDNSNames holds the DNS names associated with cert.
certDNSNames []string
// registerIntrospectionHandlers is a function that will
// call a function with (path, http.Handler) tuples. This
// is to support registering the handlers underneath the
// "/introspection" prefix.
registerIntrospectionHandlers func(func(string, http.Handler))
}
// LoginValidator functions are used to decide whether login requests
// are to be allowed. The validator is called before credentials are
// checked.
type LoginValidator func(authUser names.Tag) error
// ServerConfig holds parameters required to set up an API server.
type ServerConfig struct {
Clock clock.Clock
PingClock clock.Clock
Cert string
Key string
Tag names.Tag
DataDir string
LogDir string
Validator LoginValidator
Hub *pubsub.StructuredHub
CertChanged <-chan params.StateServingInfo
// AutocertDNSName holds the DNS name for which
// official TLS certificates will be obtained. If this is
// empty, no certificates will be requested.
AutocertDNSName string
// AutocertURL holds the URL from which official
// TLS certificates will be obtained. By default,
// acme.LetsEncryptURL will be used.
AutocertURL string
// AllowModelAccess holds whether users will be allowed to
// access models that they have access rights to even when
// they don't have access to the controller.
AllowModelAccess bool
// NewObserver is a function which will return an observer. This
// is used per-connection to instantiate a new observer to be
// notified of key events during API requests.
NewObserver observer.ObserverFactory
// RegisterIntrospectionHandlers is a function that will
// call a function with (path, http.Handler) tuples. This
// is to support registering the handlers underneath the
// "/introspection" prefix.
RegisterIntrospectionHandlers func(func(string, http.Handler))
// RateLimitConfig holds paramaters to control
// aspects of rate limiting connections and logins.
RateLimitConfig RateLimitConfig
// LogSinkConfig holds parameters to control the API server's
// logsink endpoint behaviour. If this is nil, the values from
// DefaultLogSinkConfig() will be used.
LogSinkConfig *LogSinkConfig
// PrometheusRegisterer registers Prometheus collectors.
PrometheusRegisterer prometheus.Registerer
}
// Validate validates the API server configuration.
func (c ServerConfig) Validate() error {
if c.Hub == nil {
return errors.NotValidf("missing Hub")
}
if c.Clock == nil {
return errors.NotValidf("missing Clock")
}
if c.NewObserver == nil {
return errors.NotValidf("missing NewObserver")
}
if err := c.RateLimitConfig.Validate(); err != nil {
return errors.Annotate(err, "validating rate limit configuration")
}
if c.LogSinkConfig != nil {
if err := c.LogSinkConfig.Validate(); err != nil {
return errors.Annotate(err, "validating logsink configuration")
}
}
return nil
}
func (c ServerConfig) pingClock() clock.Clock {
if c.PingClock == nil {
return c.Clock
}
return c.PingClock
}
// RateLimitConfig holds parameters to control
// aspects of rate limiting connections and logins.
type RateLimitConfig struct {
LoginRateLimit int
LoginMinPause time.Duration
LoginMaxPause time.Duration
LoginRetryPause time.Duration
ConnMinPause time.Duration
ConnMaxPause time.Duration
ConnLookbackWindow time.Duration
ConnLowerThreshold int
ConnUpperThreshold int
}
// DefaultRateLimitConfig returns a RateLimtConfig struct with
// all attributes set to their default values.
func DefaultRateLimitConfig() RateLimitConfig {
return RateLimitConfig{
LoginRateLimit: defaultLoginRateLimit,
LoginMinPause: defaultLoginMinPause,
LoginMaxPause: defaultLoginMaxPause,
LoginRetryPause: defaultLoginRetryPause,
ConnMinPause: defaultConnMinPause,
ConnMaxPause: defaultConnMaxPause,
ConnLookbackWindow: defaultConnLookbackWindow,
ConnLowerThreshold: defaultConnLowerThreshold,
ConnUpperThreshold: defaultConnUpperThreshold,
}
}
// Validate validates the rate limit configuration.
// We apply arbitrary but sensible upper limits to prevent
// typos from introducing obviously bad config.
func (c RateLimitConfig) Validate() error {
if c.LoginRateLimit <= 0 || c.LoginRateLimit > 100 {
return errors.NotValidf("login-rate-limit %d <= 0 or > 100", c.LoginRateLimit)
}
if c.LoginMinPause < 0 || c.LoginMinPause > 100*time.Millisecond {
return errors.NotValidf("login-min-pause %d < 0 or > 100ms", c.LoginMinPause)
}
if c.LoginMaxPause < 0 || c.LoginMaxPause > 5*time.Second {
return errors.NotValidf("login-max-pause %d < 0 or > 5s", c.LoginMaxPause)
}
if c.LoginRetryPause < 0 || c.LoginRetryPause > 10*time.Second {
return errors.NotValidf("login-retry-pause %d < 0 or > 10s", c.LoginRetryPause)
}
if c.ConnMinPause < 0 || c.ConnMinPause > 100*time.Millisecond {
return errors.NotValidf("conn-min-pause %d < 0 or > 100ms", c.ConnMinPause)
}
if c.ConnMaxPause < 0 || c.ConnMaxPause > 10*time.Second {
return errors.NotValidf("conn-max-pause %d < 0 or > 10s", c.ConnMaxPause)
}
if c.ConnLookbackWindow < 0 || c.ConnLookbackWindow > 5*time.Second {
return errors.NotValidf("conn-lookback-window %d < 0 or > 5s", c.ConnMaxPause)
}
return nil
}
// LogSinkConfig holds parameters to control the API server's
// logsink endpoint behaviour.
type LogSinkConfig struct {
// DBLoggerBufferSize is the capacity of the database logger's buffer.
DBLoggerBufferSize int
// DBLoggerFlushInterval is the amount of time to allow a log record
// to sit in the buffer before being flushed to the database.
DBLoggerFlushInterval time.Duration
// RateLimitBurst defines the number of log messages that will be let
// through before we start rate limiting.
RateLimitBurst int64
// RateLimitRefill defines the rate at which log messages will be let
// through once the initial burst amount has been depleted.
RateLimitRefill time.Duration
}
// Validate validates the logsink endpoint configuration.
func (cfg LogSinkConfig) Validate() error {
if cfg.DBLoggerBufferSize <= 0 || cfg.DBLoggerBufferSize > 1000 {
return errors.NotValidf("DBLoggerBufferSize %d <= 0 or > 1000", cfg.DBLoggerBufferSize)
}
if cfg.DBLoggerFlushInterval <= 0 || cfg.DBLoggerFlushInterval > 10*time.Second {
return errors.NotValidf("DBLoggerFlushInterval %s <= 0 or > 10 seconds", cfg.DBLoggerFlushInterval)
}
if cfg.RateLimitBurst <= 0 {
return errors.NotValidf("RateLimitBurst %d <= 0", cfg.RateLimitBurst)
}
if cfg.RateLimitRefill <= 0 {
return errors.NotValidf("RateLimitRefill %s <= 0", cfg.RateLimitRefill)
}
return nil
}
// DefaultLogSinkConfig returns a LogSinkConfig with default values.
func DefaultLogSinkConfig() LogSinkConfig {
return LogSinkConfig{
DBLoggerBufferSize: defaultDBLoggerBufferSize,
DBLoggerFlushInterval: defaultDBLoggerFlushInterval,
RateLimitBurst: defaultLogSinkRateLimitBurst,
RateLimitRefill: defaultLogSinkRateLimitRefill,
}
}
// NewServer serves the given state by accepting requests on the given
// listener, using the given certificate and key (in PEM format) for
// authentication.
//
// The Server will not close the StatePool; the caller is responsible
// for closing it after the Server has been stopped.
//
// The Server will close the listener when it exits, even if returns
// an error.
func NewServer(stPool *state.StatePool, lis net.Listener, cfg ServerConfig) (*Server, error) {
if cfg.LogSinkConfig == nil {
logSinkConfig := DefaultLogSinkConfig()
cfg.LogSinkConfig = &logSinkConfig
}
if err := cfg.Validate(); err != nil {
return nil, errors.Trace(err)
}
// Important note:
// Do not manipulate the state within NewServer as the API
// server needs to run before mongo upgrades have happened and
// any state manipulation may be be relying on features of the
// database added by upgrades. Here be dragons.
srv, err := newServer(stPool, lis, cfg)
if err != nil {
// There is no running server around to close the listener.
lis.Close()
return nil, errors.Trace(err)
}
return srv, nil
}
func newServer(stPool *state.StatePool, lis net.Listener, cfg ServerConfig) (_ *Server, err error) {
limiter := utils.NewLimiterWithPause(
cfg.RateLimitConfig.LoginRateLimit, cfg.RateLimitConfig.LoginMinPause,
cfg.RateLimitConfig.LoginMaxPause, clock.WallClock)
srv := &Server{
clock: cfg.Clock,
pingClock: cfg.pingClock(),
lis: lis,
newObserver: cfg.NewObserver,
statePool: stPool,
tag: cfg.Tag,
dataDir: cfg.DataDir,
logDir: cfg.LogDir,
limiter: limiter,
loginRetryPause: cfg.RateLimitConfig.LoginRetryPause,
validator: cfg.Validator,
facades: AllFacades(),
centralHub: cfg.Hub,
certChanged: cfg.CertChanged,
allowModelAccess: cfg.AllowModelAccess,
publicDNSName_: cfg.AutocertDNSName,
registerIntrospectionHandlers: cfg.RegisterIntrospectionHandlers,
logsinkRateLimitConfig: logsink.RateLimitConfig{
Refill: cfg.LogSinkConfig.RateLimitRefill,
Burst: cfg.LogSinkConfig.RateLimitBurst,
Clock: cfg.Clock,
},
dbloggers: dbloggers{
clock: cfg.Clock,
dbLoggerBufferSize: cfg.LogSinkConfig.DBLoggerBufferSize,
dbLoggerFlushInterval: cfg.LogSinkConfig.DBLoggerFlushInterval,
},
}
srv.tlsConfig = srv.newTLSConfig(cfg)
srv.lis = newThrottlingListener(
tls.NewListener(lis, srv.tlsConfig), cfg.RateLimitConfig, clock.WallClock)
// The auth context for authenticating logins.
srv.loginAuthCtxt, err = newAuthContext(stPool.SystemState())
if err != nil {
return nil, errors.Trace(err)
}
// The auth context for authenticating access to application offers.
srv.offerAuthCtxt, err = newOfferAuthcontext(stPool)
if err != nil {
return nil, errors.Trace(err)
}
if err := srv.updateCertificate(cfg.Cert, cfg.Key); err != nil {
return nil, errors.Annotatef(err, "cannot set initial certificate")
}
logSinkWriter, err := logsink.NewFileWriter(filepath.Join(srv.logDir, "logsink.log"))
if err != nil {
return nil, errors.Annotate(err, "creating logsink writer")
}
srv.logSinkWriter = logSinkWriter
if cfg.PrometheusRegisterer != nil {
apiserverCollectior := NewMetricsCollector(&metricAdaptor{srv})
cfg.PrometheusRegisterer.Unregister(apiserverCollectior)
if err := cfg.PrometheusRegisterer.Register(apiserverCollectior); err != nil {
return nil, errors.Annotate(err, "registering apiserver metrics collector")
}
}
go srv.run()
return srv, nil
}
type metricAdaptor struct {
srv *Server
}
func (a *metricAdaptor) TotalConnections() int64 {
return a.srv.TotalConnections()
}
func (a *metricAdaptor) ConnectionCount() int64 {
return a.srv.ConnectionCount()
}
func (a *metricAdaptor) ConcurrentLoginAttempts() int64 {
return a.srv.LoginAttempts()
}
func (a *metricAdaptor) ConnectionPauseTime() time.Duration {
return a.srv.lis.(*throttlingListener).pauseTime()
}
func (srv *Server) newTLSConfig(cfg ServerConfig) *tls.Config {
tlsConfig := utils.SecureTLSConfig()
if cfg.AutocertDNSName == "" {
// No official DNS name, no certificate.
tlsConfig.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
cert, _ := srv.localCertificate(clientHello.ServerName)
return cert, nil
}
return tlsConfig
}
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
Cache: srv.statePool.SystemState().AutocertCache(),
HostPolicy: autocert.HostWhitelist(cfg.AutocertDNSName),
}
if cfg.AutocertURL != "" {
m.Client = &acme.Client{
DirectoryURL: cfg.AutocertURL,
}
}
tlsConfig.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
logger.Infof("getting certificate for server name %q", clientHello.ServerName)
// Get the locally created certificate and whether it's appropriate
// for the SNI name. If not, we'll try to get an acme cert and
// fall back to the local certificate if that fails.
cert, shouldUse := srv.localCertificate(clientHello.ServerName)
if shouldUse {
return cert, nil
}
acmeCert, err := m.GetCertificate(clientHello)
if err == nil {
return acmeCert, nil
}
logger.Errorf("cannot get autocert certificate for %q: %v", clientHello.ServerName, err)
return cert, nil
}
return tlsConfig
}
// TotalConnections returns the total number of connections ever made.
func (srv *Server) TotalConnections() int64 {
return atomic.LoadInt64(&srv.totalConn)
}
// ConnectionCount returns the number of current connections.
func (srv *Server) ConnectionCount() int64 {
return atomic.LoadInt64(&srv.connCount)
}
// LoginAttempts returns the number of current login attempts.
func (srv *Server) LoginAttempts() int64 {
return atomic.LoadInt64(&srv.loginAttempts)
}
// Dead returns a channel that signals when the server has exited.
func (srv *Server) Dead() <-chan struct{} {
return srv.tomb.Dead()
}
// Stop stops the server and returns when all running requests
// have completed.
func (srv *Server) Stop() error {
srv.tomb.Kill(nil)
return srv.tomb.Wait()
}
// Kill implements worker.Worker.Kill.
func (srv *Server) Kill() {
srv.tomb.Kill(nil)
}
// Wait implements worker.Worker.Wait.
func (srv *Server) Wait() error {
return srv.tomb.Wait()
}
// loggoWrapper is an io.Writer() that forwards the messages to a loggo.Logger.
// Unfortunately http takes a concrete stdlib log.Logger struct, and not an
// interface, so we can't just proxy all of the log levels without inspecting
// the string content. For now, we just want to get the messages into the log
// file.
type loggoWrapper struct {
logger loggo.Logger
level loggo.Level
}
func (w *loggoWrapper) Write(content []byte) (int, error) {
w.logger.Logf(w.level, "%s", string(content))
return len(content), nil
}
func (srv *Server) run() {
logger.Infof("listening on %q", srv.lis.Addr())
defer func() {
addr := srv.lis.Addr().String() // Addr not valid after close
err := srv.lis.Close()
logger.Infof("closed listening socket %q with final error: %v", addr, err)
// Break deadlocks caused by leadership BlockUntil... calls.
srv.statePool.KillWorkers()
srv.statePool.SystemState().KillWorkers()
srv.wg.Wait() // wait for any outstanding requests to complete.
srv.tomb.Done()
srv.dbloggers.dispose()
srv.logSinkWriter.Close()
}()
srv.wg.Add(1)
go func() {
defer srv.wg.Done()
srv.tomb.Kill(srv.mongoPinger())
}()
srv.wg.Add(1)
go func() {
defer srv.wg.Done()
srv.tomb.Kill(srv.expireLocalLoginInteractions())
}()
srv.wg.Add(1)
go func() {
defer srv.wg.Done()
srv.tomb.Kill(srv.processCertChanges())
}()
srv.wg.Add(1)
go func() {
defer srv.wg.Done()
srv.tomb.Kill(srv.processModelRemovals())
}()
// for pat based handlers, they are matched in-order of being
// registered, first match wins. So more specific ones have to be
// registered first.
mux := pat.New()
for _, endpoint := range srv.endpoints() {
registerEndpoint(endpoint, mux)
}
go func() {
logger.Debugf("Starting API http server on address %q", srv.lis.Addr())
httpSrv := &http.Server{
Handler: mux,
TLSConfig: srv.tlsConfig,
ErrorLog: log.New(&loggoWrapper{
level: loggo.WARNING,
logger: logger,
}, "", 0), // no prefix and no flags so log.Logger doesn't add extra prefixes
}
err := httpSrv.Serve(srv.lis)
// Normally logging an error at debug level would be grounds for a beating,
// however in this case the error is *expected* to be non nil, and does not
// affect the operation of the apiserver, but for completeness log it anyway.
logger.Debugf("API http server exited, final error was: %v", err)
}()
<-srv.tomb.Dying()
}
func (srv *Server) endpoints() []apihttp.Endpoint {
var endpoints []apihttp.Endpoint
add := func(pattern string, handler http.Handler) {
// TODO: We can switch from all methods to specific ones for entries
// where we only want to support specific request methods. However, our
// tests currently assert that errors come back as application/json and
// pat only does "text/plain" responses.
for _, method := range defaultHTTPMethods {
endpoints = append(endpoints, apihttp.Endpoint{
Pattern: pattern,
Method: method,
Handler: handler,
})
}
}
httpCtxt := httpContext{
srv: srv,
}
strictCtxt := httpCtxt
strictCtxt.strictValidation = true
strictCtxt.controllerModelOnly = true
mainAPIHandler := srv.trackRequests(http.HandlerFunc(srv.apiHandler))
logStreamHandler := srv.trackRequests(newLogStreamEndpointHandler(httpCtxt))
debugLogHandler := srv.trackRequests(newDebugLogDBHandler(httpCtxt))
pubsubHandler := srv.trackRequests(newPubSubHandler(httpCtxt, srv.centralHub))
// This handler is model specific even though it only ever makes sense
// for a controller because the API caller that is handed to the worker
// that is forwarding the messages between controllers is bound to the
// /model/:modeluuid namespace.
add("/model/:modeluuid/pubsub", pubsubHandler)
add("/model/:modeluuid/logstream", logStreamHandler)
add("/model/:modeluuid/log", debugLogHandler)
logSinkHandler := logsink.NewHTTPHandler(
newAgentLogWriteCloserFunc(httpCtxt, srv.logSinkWriter, &srv.dbloggers),
httpCtxt.stop(),
&srv.logsinkRateLimitConfig,
)
add("/model/:modeluuid/logsink", srv.trackRequests(logSinkHandler))
// We don't need to save the migrated logs to a logfile as well as to the DB.
logTransferHandler := logsink.NewHTTPHandler(
newMigrationLogWriteCloserFunc(httpCtxt, &srv.dbloggers),
httpCtxt.stop(),
nil, // no rate-limiting
)
add("/migrate/logtransfer", srv.trackRequests(logTransferHandler))
modelRestHandler := &modelRestHandler{
ctxt: httpCtxt,
dataDir: srv.dataDir,
stateAuthFunc: httpCtxt.stateForRequestAuthenticatedUser,
}
modelRestServer := &RestHTTPHandler{
GetHandler: modelRestHandler.ServeGet,
}
add("/model/:modeluuid/rest/1.0/:entity/:name/:attribute", modelRestServer)
modelCharmsHandler := &charmsHandler{
ctxt: httpCtxt,
dataDir: srv.dataDir,
stateAuthFunc: httpCtxt.stateForRequestAuthenticatedUser,
}
charmsServer := &CharmsHTTPHandler{
PostHandler: modelCharmsHandler.ServePost,
GetHandler: modelCharmsHandler.ServeGet,
}
add("/model/:modeluuid/charms", charmsServer)
add("/model/:modeluuid/tools",
&toolsUploadHandler{
ctxt: httpCtxt,
stateAuthFunc: httpCtxt.stateForRequestAuthenticatedUser,
},
)
add("/model/:modeluuid/applications/:application/resources/:resource", &ResourcesHandler{
StateAuthFunc: func(req *http.Request, tagKinds ...string) (ResourcesBackend, state.StatePoolReleaser, names.Tag, error) {
st, closer, entity, err := httpCtxt.stateForRequestAuthenticatedTag(req, tagKinds...)
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
rst, err := st.Resources()
if err != nil {
return nil, nil, nil, errors.Trace(err)
}
return rst, closer, entity.Tag(), nil
},
})
add("/model/:modeluuid/units/:unit/resources/:resource", &UnitResourcesHandler{
NewOpener: func(req *http.Request, tagKinds ...string) (resource.Opener, state.StatePoolReleaser, error) {
st, closer, _, err := httpCtxt.stateForRequestAuthenticatedTag(req, tagKinds...)
if err != nil {
return nil, nil, errors.Trace(err)
}
tagStr := req.URL.Query().Get(":unit")
tag, err := names.ParseUnitTag(tagStr)
if err != nil {
return nil, nil, errors.Trace(err)
}
opener, err := resourceadapters.NewResourceOpener(st, tag.Id())
if err != nil {
return nil, nil, errors.Trace(err)
}
return opener, closer, nil
},
})
migrateCharmsHandler := &charmsHandler{
ctxt: httpCtxt,
dataDir: srv.dataDir,
stateAuthFunc: httpCtxt.stateForMigrationImporting,
}
add("/migrate/charms",
&CharmsHTTPHandler{
PostHandler: migrateCharmsHandler.ServePost,
GetHandler: migrateCharmsHandler.ServeUnsupported,
},
)
add("/migrate/tools",
&toolsUploadHandler{
ctxt: httpCtxt,
stateAuthFunc: httpCtxt.stateForMigrationImporting,
},
)
add("/migrate/resources",
&resourcesMigrationUploadHandler{
ctxt: httpCtxt,
stateAuthFunc: httpCtxt.stateForMigrationImporting,
},
)
add("/model/:modeluuid/tools/:version",
&toolsDownloadHandler{
ctxt: httpCtxt,
},
)
add("/model/:modeluuid/backups",
&backupHandler{
ctxt: strictCtxt,
},
)
add("/model/:modeluuid/api", mainAPIHandler)
// GUI related paths.
endpoints = append(endpoints, guiEndpoints(guiURLPathPrefix, srv.dataDir, httpCtxt)...)
add("/gui-archive", &guiArchiveHandler{
ctxt: httpCtxt,
})
add("/gui-version", &guiVersionHandler{
ctxt: httpCtxt,
})
// For backwards compatibility we register all the old paths
add("/log", debugLogHandler)
add("/charms", charmsServer)
add("/tools",
&toolsUploadHandler{
ctxt: httpCtxt,
stateAuthFunc: httpCtxt.stateForRequestAuthenticatedUser,
},
)
add("/tools/:version",
&toolsDownloadHandler{
ctxt: httpCtxt,
},
)
add("/register",
®isterUserHandler{
ctxt: httpCtxt,
},
)
add("/api", mainAPIHandler)
// Serve the API at / (only) for backward compatiblity. Note that the
// pat muxer special-cases / so that it does not serve all
// possible endpoints, but only / itself.
add("/", mainAPIHandler)
// Register the introspection endpoints.
if srv.registerIntrospectionHandlers != nil {
handle := func(subpath string, handler http.Handler) {
add(path.Join("/introspection/", subpath),
introspectionHandler{
httpCtxt,
handler,
},
)
}
srv.registerIntrospectionHandlers(handle)
}
// Add HTTP handlers for local-user macaroon authentication.
localLoginHandlers := &localLoginHandlers{srv.loginAuthCtxt, srv.statePool.SystemState()}
dischargeMux := http.NewServeMux()
httpbakery.AddDischargeHandler(
dischargeMux,
localUserIdentityLocationPath,
localLoginHandlers.authCtxt.localUserThirdPartyBakeryService,
localLoginHandlers.checkThirdPartyCaveat,
)
dischargeMux.Handle(
localUserIdentityLocationPath+"/login",
makeHandler(handleJSON(localLoginHandlers.serveLogin)),
)
dischargeMux.Handle(
localUserIdentityLocationPath+"/wait",
makeHandler(handleJSON(localLoginHandlers.serveWait)),
)
add(localUserIdentityLocationPath+"/discharge", dischargeMux)
add(localUserIdentityLocationPath+"/publickey", dischargeMux)
add(localUserIdentityLocationPath+"/login", dischargeMux)
add(localUserIdentityLocationPath+"/wait", dischargeMux)
// Add HTTP handlers for application offer macaroon authentication.
appOfferHandler := &localOfferAuthHandler{authCtx: srv.offerAuthCtxt}
appOfferDischargeMux := http.NewServeMux()
httpbakery.AddDischargeHandler(
appOfferDischargeMux,
localOfferAccessLocationPath,
// Sadly we need a type assertion since the method doesn't accept an interface.
srv.offerAuthCtxt.ThirdPartyBakeryService().(*bakery.Service),
appOfferHandler.checkThirdPartyCaveat,
)
add(localOfferAccessLocationPath+"/discharge", appOfferDischargeMux)
add(localOfferAccessLocationPath+"/publickey", appOfferDischargeMux)
return endpoints
}
func (srv *Server) expireLocalLoginInteractions() error {
for {
select {
case <-srv.tomb.Dying():
return tomb.ErrDying
case <-srv.clock.After(authentication.LocalLoginInteractionTimeout):
now := srv.loginAuthCtxt.clock.Now()
srv.loginAuthCtxt.localUserInteractions.Expire(now)
}
}
}
// trackRequests wraps a http.Handler, incrementing and decrementing
// the apiserver's WaitGroup and blocking request when the apiserver
// is shutting down.
//
// Note: It is only safe to use trackRequests with API handlers which
// are interruptible (i.e. they pay attention to the apiserver tomb)
// or are guaranteed to be short-lived. If it's used with long running
// API handlers which don't watch the apiserver's tomb, apiserver
// shutdown will be blocked until the API handler returns.
func (srv *Server) trackRequests(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Care must be taken to not increment the waitgroup count
// after the listener has closed.
//
// First we check to see if the tomb has not yet been killed
// because the closure of the listener depends on the tomb being
// killed to trigger the defer block in srv.run.
select {
case <-srv.tomb.Dying():
// This request was accepted before the listener was closed
// but after the tomb was killed. As we're in the process of
// shutting down, do not consider this request as in progress,
// just send a 503 and return.
http.Error(w, "apiserver shutdown in progress", 503)
default:
// If we get here then the tomb was not killed therefore the
// listener is still open. It is safe to increment the
// wg counter as wg.Wait in srv.run has not yet been called.
srv.wg.Add(1)
defer srv.wg.Done()
handler.ServeHTTP(w, r)
}
})
}
func registerEndpoint(ep apihttp.Endpoint, mux *pat.PatternServeMux) {
mux.Add(ep.Method, ep.Pattern, ep.Handler)
if ep.Method == "GET" {
mux.Add("HEAD", ep.Pattern, ep.Handler)
}
}
func (srv *Server) apiHandler(w http.ResponseWriter, req *http.Request) {
atomic.AddInt64(&srv.totalConn, 1)
addCount := func(delta int64) {
atomic.AddInt64(&srv.connCount, delta)
}
addCount(1)
defer addCount(-1)
connectionID := atomic.AddUint64(&srv.lastConnectionID, 1)
apiObserver := srv.newObserver()
apiObserver.Join(req, connectionID)
defer apiObserver.Leave()
websocket.Serve(w, req, func(conn *websocket.Conn) {
modelUUID := req.URL.Query().Get(":modeluuid")
logger.Tracef("got a request for model %q", modelUUID)
if err := srv.serveConn(conn, modelUUID, apiObserver, req.Host); err != nil {
logger.Errorf("error serving RPCs: %v", err)
}
})
}
func (srv *Server) serveConn(wsConn *websocket.Conn, modelUUID string, apiObserver observer.Observer, host string) error {
codec := jsoncodec.NewWebsocket(wsConn.Conn)
conn := rpc.NewConn(codec, apiObserver)
// Note that we don't overwrite modelUUID here because
// newAPIHandler treats an empty modelUUID as signifying
// the API version used.
resolvedModelUUID, err := validateModelUUID(validateArgs{
statePool: srv.statePool,
modelUUID: modelUUID,
})
var (
st *state.State
h *apiHandler
releaser state.StatePoolReleaser
)
if err == nil {
st, releaser, err = srv.statePool.Get(resolvedModelUUID)
}
if err == nil {
defer releaser()
h, err = newAPIHandler(srv, st, conn, modelUUID, host)
}
if err != nil {
conn.ServeRoot(&errRoot{errors.Trace(err)}, serverError)
} else {
// Set up the admin apis used to accept logins and direct
// requests to the relevant business facade.
// There may be more than one since we need a new API each
// time login changes in a non-backwards compatible way.
adminAPIs := make(map[int]interface{})
for apiVersion, factory := range adminAPIFactories {
adminAPIs[apiVersion] = factory(srv, h, apiObserver)
}
conn.ServeRoot(newAdminRoot(h, adminAPIs), serverError)
}
conn.Start()
select {
case <-conn.Dead():
case <-srv.tomb.Dying():
}
return conn.Close()
}
func (srv *Server) mongoPinger() error {
session := srv.statePool.SystemState().MongoSession().Copy()
defer session.Close()
for {
if err := session.Ping(); err != nil {
logger.Infof("got error pinging mongo: %v", err)
return errors.Annotate(err, "error pinging mongo")
}
select {
case <-srv.clock.After(mongoPingInterval):
case <-srv.tomb.Dying():
return tomb.ErrDying
}
}
}
// publicDNSName returns the current public hostname.
func (srv *Server) publicDNSName() string {
srv.mu.Lock()
defer srv.mu.Unlock()
return srv.publicDNSName_
}
// localCertificate returns the local server certificate and reports
// whether it should be used to serve a connection addressed to the
// given server name.
func (srv *Server) localCertificate(serverName string) (*tls.Certificate, bool) {
srv.mu.Lock()
defer srv.mu.Unlock()
if net.ParseIP(serverName) != nil {
// IP address connections always use the local certificate.
return srv.cert, true
}
if !strings.Contains(serverName, ".") {
// If the server name doesn't contain a period there's no
// way we can obtain a certificate for it.
// This applies to the common case where "juju-apiserver" is
// used as the server name.
return srv.cert, true
}
// Perhaps the server name is explicitly mentioned by the server certificate.
for _, name := range srv.certDNSNames {
if name == serverName {
return srv.cert, true
}
}