diff --git a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java index 297641598..6c209aba1 100644 --- a/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java +++ b/MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java @@ -132,10 +132,14 @@ default public void onTrafficRateLimits(long upstreamBytesPerSecond, long downst default public void onApplicationParameters(Object parameters) {} default public void onServerAlert(String reason, String subject, List actionURLs) {} /** - * Called when tunnel-core emits a message to be displayed to the in-proxy operator. - * @param message The operator message received. + * Called when tunnel-core reports that a selected in-proxy mode -- + * including running a proxy; or running a client in personal pairing + * mode -- cannot function without an app upgrade. The receiver + * should alert the user to upgrade the app and/or disable the + * unsupported mode(s). This callback is followed by a tunnel-core + * shutdown. */ - default void onInproxyOperatorMessage(String message) {} + default void onInproxyMustUpgrade() {} /** * Called when tunnel-core reports proxy usage statistics. * By default onInproxyProxyActivity is disabled. Enable it by setting @@ -1115,8 +1119,8 @@ private void handlePsiphonNotice(String noticeJSON) { notice.getJSONObject("data").getString("reason"), notice.getJSONObject("data").getString("subject"), actionURLsList); - } else if (noticeType.equals("InproxyOperatorMessage")) { - mHostService.onInproxyOperatorMessage( notice.getJSONObject("data").getString("message")); + } else if (noticeType.equals("InproxyMustUpgrade")) { + mHostService.onInproxyMustUpgrade(); } else if (noticeType.equals("InproxyProxyActivity")) { JSONObject data = notice.getJSONObject("data"); mHostService.onInproxyProxyActivity( diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h index 9106665db..46a7ce581 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h @@ -300,10 +300,13 @@ WWAN or vice versa or VPN state changed - (void)onApplicationParameters:(NSDictionary * _Nonnull)parameters; /*! - Called when tunnel-core emits a message to be displayed to the in-proxy operator - @param message The operator message received. +Called when tunnel-core reports that a selected in-proxy mode -- including +running a proxy; or running a client in personal pairing mode -- cannot +function without an app upgrade. The receiver should alert the user to +upgrade the app and/or disable the unsupported mode(s). This callback is +followed by a tunnel-core shutdown. */ -- (void)onInproxyOperatorMessage:(NSString * _Nonnull)message; +- (void)onInproxyMustUpgrade; /*! Called when tunnel-core reports in-proxy usage statistics diff --git a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m index 268e0fc6d..7dc62e1cf 100644 --- a/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m +++ b/MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m @@ -1174,15 +1174,10 @@ - (void)handlePsiphonNotice:(NSString * _Nonnull)noticeJSON { }); } } - else if ([noticeType isEqualToString:@"InproxyOperatorMessage"]) { - id message = [notice valueForKeyPath:@"data.message"]; - if (![message isKindOfClass:[NSString class]]) { - [self logMessage:[NSString stringWithFormat: @"InproxyOperatorMessage notice missing data.message: %@", noticeJSON]]; - return; - } - if ([self.tunneledAppDelegate respondsToSelector:@selector(onInproxyOperatorMessage:)]) { + else if ([noticeType isEqualToString:@"InproxyMustUpgrade"]) { + if ([self.tunneledAppDelegate respondsToSelector:@selector(onInproxyMustUpgrade)]) { dispatch_sync(self->callbackQueue, ^{ - [self.tunneledAppDelegate onInproxyOperatorMessage:message]; + [self.tunneledAppDelegate onInproxyMustUpgrade]; }); } } diff --git a/go.mod b/go.mod index bb184c77a..461096257 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,6 @@ require ( github.com/florianl/go-nfqueue v1.1.1-0.20200829120558-a2f196e98ab0 github.com/flynn/noise v1.0.1-0.20220214164934-d803f5c4b0f4 github.com/fxamacker/cbor/v2 v2.5.0 - github.com/gammazero/deque v0.2.1 github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/google/gopacket v1.1.19 diff --git a/go.sum b/go.sum index 2407fb76d..904d0648c 100644 --- a/go.sum +++ b/go.sum @@ -75,8 +75,6 @@ github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3 github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= -github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/gaukas/godicttls v0.0.4 h1:NlRaXb3J6hAnTmWdsEKb9bcSBD6BvcIjdGdeb0zfXbk= github.com/gaukas/godicttls v0.0.4/go.mod h1:l6EenT4TLWgTdwslVb4sEMOCf7Bv0JAK67deKr9/NCI= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= diff --git a/psiphon/common/inproxy/api.go b/psiphon/common/inproxy/api.go index be66626d1..ec9317d3e 100644 --- a/psiphon/common/inproxy/api.go +++ b/psiphon/common/inproxy/api.go @@ -243,14 +243,17 @@ type WebRTCSessionDescription struct { // to relay client traffic with. The broker validates that the dial address // corresponds to a valid Psiphon server. // -// OperatorMessageJSON is an optional message bundle to be forwarded to the -// user interface for display to the user; for example, to alert the proxy -// operator of configuration issue; the JSON schema is not defined here. +// MustUpgrade is an optional flag that is set by the broker, based on the +// submitted ProxyProtocolVersion, when the proxy app must be upgraded in +// order to function properly. Potential must-upgrade scenarios include +// changes to the personal pairing broker rendezvous algorithm, where no +// protocol backwards compatibility accommodations can ensure a rendezvous +// and match. When MustUpgrade is set, NoMatch is implied. type ProxyAnnounceResponse struct { - OperatorMessageJSON string `cbor:"1,keyasint,omitempty"` TacticsPayload []byte `cbor:"2,keyasint,omitempty"` Limited bool `cbor:"3,keyasint,omitempty"` NoMatch bool `cbor:"4,keyasint,omitempty"` + MustUpgrade bool `cbor:"13,keyasint,omitempty"` ConnectionID ID `cbor:"5,keyasint,omitempty"` ClientProxyProtocolVersion int32 `cbor:"6,keyasint,omitempty"` ClientOfferSDP WebRTCSessionDescription `cbor:"7,keyasint,omitempty"` @@ -322,9 +325,17 @@ type DataChannelTrafficShapingParameters struct { // the broker using ClientRelayedPacketRequests and continues to relay using // ClientRelayedPacketRequests until complete. ConnectionID identifies this // connection and its relayed BrokerServerReport. +// +// MustUpgrade is an optional flag that is set by the broker, based on the +// submitted ProxyProtocolVersion, when the client app must be upgraded in +// order to function properly. Potential must-upgrade scenarios include +// changes to the personal pairing broker rendezvous algorithm, where no +// protocol backwards compatibility accommodations can ensure a rendezvous +// and match. When MustUpgrade is set, NoMatch is implied. type ClientOfferResponse struct { Limited bool `cbor:"1,keyasint,omitempty"` NoMatch bool `cbor:"2,keyasint,omitempty"` + MustUpgrade bool `cbor:"7,keyasint,omitempty"` ConnectionID ID `cbor:"3,keyasint,omitempty"` SelectedProxyProtocolVersion int32 `cbor:"4,keyasint,omitempty"` ProxyAnswerSDP WebRTCSessionDescription `cbor:"5,keyasint,omitempty"` @@ -544,8 +555,12 @@ func (request *ProxyAnnounceRequest) ValidateAndGetParametersAndLogFields( formatter common.APIParameterLogFieldFormatter, geoIPData common.GeoIPData) (common.APIParameters, common.LogFields, error) { - if len(request.PersonalCompartmentIDs) > maxCompartmentIDs { - return nil, nil, errors.Tracef("invalid compartment IDs length: %d", len(request.PersonalCompartmentIDs)) + // A proxy may specify at most 1 personal compartment ID. This is + // currently a limitation of the multi-queue implementation; see comment + // in announcementMultiQueue.enqueue. + if len(request.PersonalCompartmentIDs) > 1 { + return nil, nil, errors.Tracef( + "invalid compartment IDs length: %d", len(request.PersonalCompartmentIDs)) } if request.Metrics == nil { @@ -587,13 +602,31 @@ func (request *ClientOfferRequest) ValidateAndGetLogFields( "invalid compartment IDs length: %d", len(request.PersonalCompartmentIDs)) } + if len(request.CommonCompartmentIDs) > 0 && len(request.PersonalCompartmentIDs) > 0 { + return nil, nil, errors.TraceNew("multiple compartment ID types") + } + // The client offer SDP may contain no ICE candidates. errorOnNoCandidates := false + // The client offer SDP may include RFC 1918/4193 private IP addresses in + // personal pairing mode. filterSDPAddresses should not filter out + // private IP addresses based on the broker's local interfaces; this + // filtering occurs on the proxy that receives the SDP. + allowPrivateIPAddressCandidates := + len(request.PersonalCompartmentIDs) > 0 && + len(request.CommonCompartmentIDs) == 0 + filterPrivateIPAddressCandidates := false + // Client offer SDP candidate addresses must match the country and ASN of // the client. Don't facilitate connections to arbitrary destinations. filteredSDP, sdpMetrics, err := filterSDPAddresses( - []byte(request.ClientOfferSDP.SDP), errorOnNoCandidates, lookupGeoIP, geoIPData) + []byte(request.ClientOfferSDP.SDP), + errorOnNoCandidates, + lookupGeoIP, + geoIPData, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates) if err != nil { return nil, nil, errors.Trace(err) } @@ -637,6 +670,7 @@ func (request *ClientOfferRequest) ValidateAndGetLogFields( logFields["has_personal_compartment_ids"] = hasPersonalCompartmentIDs logFields["ice_candidate_types"] = request.ICECandidateTypes logFields["has_IPv6"] = sdpMetrics.hasIPv6 + logFields["has_private_IP"] = sdpMetrics.hasPrivateIP logFields["filtered_ice_candidates"] = sdpMetrics.filteredICECandidates return filteredSDP, logFields, nil @@ -679,15 +713,28 @@ func (request *ProxyAnswerRequest) ValidateAndGetLogFields( lookupGeoIP LookupGeoIP, baseAPIParameterValidator common.APIParameterValidator, formatter common.APIParameterLogFieldFormatter, - geoIPData common.GeoIPData) ([]byte, common.LogFields, error) { + geoIPData common.GeoIPData, + proxyAnnouncementHasPersonalCompartmentIDs bool) ([]byte, common.LogFields, error) { // The proxy answer SDP must contain at least one ICE candidate. errorOnNoCandidates := true + // The proxy answer SDP may include RFC 1918/4193 private IP addresses in + // personal pairing mode. filterSDPAddresses should not filter out + // private IP addresses based on the broker's local interfaces; this + // filtering occurs on the client that receives the SDP. + allowPrivateIPAddressCandidates := proxyAnnouncementHasPersonalCompartmentIDs + filterPrivateIPAddressCandidates := false + // Proxy answer SDP candidate addresses must match the country and ASN of // the proxy. Don't facilitate connections to arbitrary destinations. filteredSDP, sdpMetrics, err := filterSDPAddresses( - []byte(request.ProxyAnswerSDP.SDP), errorOnNoCandidates, lookupGeoIP, geoIPData) + []byte(request.ProxyAnswerSDP.SDP), + errorOnNoCandidates, + lookupGeoIP, + geoIPData, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates) if err != nil { return nil, nil, errors.Trace(err) } @@ -712,6 +759,7 @@ func (request *ProxyAnswerRequest) ValidateAndGetLogFields( logFields["connection_id"] = request.ConnectionID logFields["ice_candidate_types"] = request.ICECandidateTypes logFields["has_IPv6"] = sdpMetrics.hasIPv6 + logFields["has_private_IP"] = sdpMetrics.hasPrivateIP logFields["filtered_ice_candidates"] = sdpMetrics.filteredICECandidates logFields["answer_error"] = request.AnswerError diff --git a/psiphon/common/inproxy/broker.go b/psiphon/common/inproxy/broker.go index 722cd963b..6bbd0f45c 100644 --- a/psiphon/common/inproxy/broker.go +++ b/psiphon/common/inproxy/broker.go @@ -587,6 +587,9 @@ func (b *Broker) handleProxyAnnounce( defer cancelFunc() extendTransportTimeout(timeout) + // Note that matcher.Announce assumes a monotonically increasing + // announceCtx.Deadline input for each successive call. + clientOffer, matchMetrics, err = b.matcher.Announce( announceCtx, proxyIP, @@ -768,6 +771,9 @@ func (b *Broker) handleClientOffer( // processSDPAddresses), so all invalid candidates are removed and the // remaining SDP is used. Filtered candidate information is logged in // logFields. + // + // In personal pairing mode, RFC 1918/4193 private IP addresses are + // permitted in exchanged SDPs and not filtered out. var filteredSDP []byte filteredSDP, logFields, err = offerRequest.ValidateAndGetLogFields( @@ -1020,13 +1026,23 @@ func (b *Broker) handleProxyAnswer( // processSDPAddresses), so all invalid candidates are removed and the // remaining SDP is used. Filtered candidate information is logged in // logFields. + // + // In personal pairing mode, RFC 1918/4193 private IP addresses are + // permitted in exchanged SDPs and not filtered out. + + hasPersonalCompartmentIDs, err := b.matcher.AnnouncementHasPersonalCompartmentIDs( + initiatorID, answerRequest.ConnectionID) + if err != nil { + return nil, errors.Trace(err) + } var filteredSDP []byte filteredSDP, logFields, err = answerRequest.ValidateAndGetLogFields( b.config.LookupGeoIP, b.config.APIParameterValidator, b.config.APIParameterLogFieldFormatter, - geoIPData) + geoIPData, + hasPersonalCompartmentIDs) if err != nil { return nil, errors.Trace(err) } diff --git a/psiphon/common/inproxy/client.go b/psiphon/common/inproxy/client.go index 7be44dcd7..dcc148a03 100644 --- a/psiphon/common/inproxy/client.go +++ b/psiphon/common/inproxy/client.go @@ -107,6 +107,16 @@ type ClientConfig struct { // with the caller invoking ServerEntryFields.RemoveUnsignedFields to // prune local, unnsigned fields before sending. PackedDestinationServerEntry []byte + + // MustUpgrade is a callback that is invoked when a MustUpgrade flag is + // received from the broker. When MustUpgrade is received, the client + // should be stopped and the user should be prompted to upgrade before + // restarting the client. + // + // In Psiphon, MustUpgrade may be ignored when not running in + // in-proxy-only personal pairing mode, as other tunnel protocols remain + // available. + MustUpgrade func() } // DialClient establishes an in-proxy connection for relaying traffic to the @@ -314,6 +324,13 @@ func dialClientWebRTCConn( ctx context.Context, config *ClientConfig) (retResult *clientWebRTCDialResult, retRetry bool, retErr error) { + brokerCoordinator := config.BrokerClient.GetBrokerDialCoordinator() + personalCompartmentIDs := brokerCoordinator.PersonalCompartmentIDs() + + // In personal pairing mode, RFC 1918/4193 private IP addresses are + // included in SDPs. + hasPersonalCompartmentIDs := len(personalCompartmentIDs) > 0 + // Initialize the WebRTC offer doTLSRandomization := config.WebRTCDialCoordinator.DoDTLSRandomization() @@ -329,7 +346,8 @@ func dialClientWebRTCConn( DoDTLSRandomization: doTLSRandomization, TrafficShapingParameters: trafficShapingParameters, ReliableTransport: config.ReliableTransport, - }) + }, + hasPersonalCompartmentIDs) if err != nil { return nil, true, errors.Trace(err) } @@ -342,8 +360,6 @@ func dialClientWebRTCConn( // Send the ClientOffer request to the broker - brokerCoordinator := config.BrokerClient.GetBrokerDialCoordinator() - packedBaseParams, err := protocol.EncodePackedAPIParameters(config.BaseAPIParameters) if err != nil { return nil, false, errors.Trace(err) @@ -366,7 +382,7 @@ func dialClientWebRTCConn( PortMappingTypes: config.WebRTCDialCoordinator.PortMappingTypes(), }, CommonCompartmentIDs: brokerCoordinator.CommonCompartmentIDs(), - PersonalCompartmentIDs: brokerCoordinator.PersonalCompartmentIDs(), + PersonalCompartmentIDs: personalCompartmentIDs, ClientOfferSDP: SDP, ICECandidateTypes: SDPMetrics.iceCandidateTypes, ClientRootObfuscationSecret: clientRootObfuscationSecret, @@ -380,8 +396,8 @@ func dialClientWebRTCConn( return nil, false, errors.Trace(err) } - // No retry when rate/entry limited; do retry on no-match, as a match may - // soon appear. + // No retry when rate/entry limited or must upgrade; do retry on no-match, + // as a match may soon appear. if offerResponse.Limited { return nil, false, errors.TraceNew("limited") @@ -390,6 +406,13 @@ func dialClientWebRTCConn( return nil, true, errors.TraceNew("no proxy match") + } else if offerResponse.MustUpgrade { + + if config.MustUpgrade != nil { + config.MustUpgrade() + } + + return nil, false, errors.TraceNew("must upgrade") } if offerResponse.SelectedProxyProtocolVersion != ProxyProtocolVersion1 { @@ -402,7 +425,8 @@ func dialClientWebRTCConn( // Establish the WebRTC DataChannel connection - err = webRTCConn.SetRemoteSDP(offerResponse.ProxyAnswerSDP) + err = webRTCConn.SetRemoteSDP( + offerResponse.ProxyAnswerSDP, hasPersonalCompartmentIDs) if err != nil { return nil, true, errors.Trace(err) } diff --git a/psiphon/common/inproxy/inproxy_disabled.go b/psiphon/common/inproxy/inproxy_disabled.go index 71f62f3cf..f3518bbe0 100644 --- a/psiphon/common/inproxy/inproxy_disabled.go +++ b/psiphon/common/inproxy/inproxy_disabled.go @@ -68,7 +68,10 @@ type webRTCConfig struct { ReliableTransport bool } -func (conn *webRTCConn) SetRemoteSDP(peerSDP WebRTCSessionDescription) error { +func (conn *webRTCConn) SetRemoteSDP( + peerSDP WebRTCSessionDescription, + hasPersonalCompartmentIDs bool) error { + return errors.Trace(errNotEnabled) } @@ -121,12 +124,14 @@ func (conn *webRTCConn) GetMetrics() common.LogFields { type webRTCSDPMetrics struct { iceCandidateTypes []ICECandidateType hasIPv6 bool + hasPrivateIP bool filteredICECandidates []string } func newWebRTCConnWithOffer( ctx context.Context, - config *webRTCConfig) ( + config *webRTCConfig, + hasPersonalCompartmentIDs bool) ( *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { return nil, WebRTCSessionDescription{}, nil, errors.Trace(errNotEnabled) } @@ -134,7 +139,8 @@ func newWebRTCConnWithOffer( func newWebRTCConnWithAnswer( ctx context.Context, config *webRTCConfig, - peerSDP WebRTCSessionDescription) ( + peerSDP WebRTCSessionDescription, + hasPersonalCompartmentIDs bool) ( *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { return nil, WebRTCSessionDescription{}, nil, errors.Trace(errNotEnabled) @@ -144,7 +150,9 @@ func filterSDPAddresses( encodedSDP []byte, errorOnNoCandidates bool, lookupGeoIP LookupGeoIP, - expectedGeoIPData common.GeoIPData) ([]byte, *webRTCSDPMetrics, error) { + expectedGeoIPData common.GeoIPData, + allowPrivateIPAddressCandidates bool, + filterPrivateIPAddressCandidates bool) ([]byte, *webRTCSDPMetrics, error) { return nil, nil, errors.Trace(errNotEnabled) } diff --git a/psiphon/common/inproxy/inproxy_test.go b/psiphon/common/inproxy/inproxy_test.go index c5ec42fe2..58721b053 100644 --- a/psiphon/common/inproxy/inproxy_test.go +++ b/psiphon/common/inproxy/inproxy_test.go @@ -451,7 +451,7 @@ func runTestInproxy() error { for { time.Sleep(100 * time.Millisecond) broker.matcher.announcementQueueMutex.Lock() - n := broker.matcher.announcementQueue.Len() + n := broker.matcher.announcementQueue.getLen() broker.matcher.announcementQueueMutex.Unlock() if n >= numProxies { break diff --git a/psiphon/common/inproxy/matcher.go b/psiphon/common/inproxy/matcher.go index c903c6762..abb14b05e 100644 --- a/psiphon/common/inproxy/matcher.go +++ b/psiphon/common/inproxy/matcher.go @@ -19,6 +19,7 @@ package inproxy import ( + "container/list" "context" std_errors "errors" "net" @@ -29,7 +30,6 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors" lrucache "github.com/cognusion/go-cache-lru" - "github.com/gammazero/deque" "golang.org/x/time/rate" ) @@ -40,6 +40,7 @@ const ( matcherOfferQueueMaxSize = 5000000 matcherPendingAnswersTTL = 30 * time.Second matcherPendingAnswersMaxSize = 100000 + matcherMaxPreferredNATProbe = 100 matcherRateLimiterReapHistoryFrequencySeconds = 300 matcherRateLimiterMaxCacheEntries = 1000000 @@ -53,12 +54,10 @@ const ( // as they are closest to timing out. // // The client and proxy must supply matching personal or common compartment -// IDs. Personal compartment matching is preferred. Common compartments are -// managed by Psiphon and can be obtained via a tactics parameter or via an -// OSL embedding. -// -// A client may opt form personal-only matching by not supplying any common -// compartment IDs. +// IDs. Common compartments are managed by Psiphon and can be obtained via a +// tactics parameter or via an OSL embedding. Each proxy announcement or +// client offer may specify only one compartment ID type, either common or +// personal. // // Matching prefers to pair proxies and clients in a way that maximizes total // possible matches. For a client or proxy with less-limited NAT traversal, a @@ -86,25 +85,21 @@ type Matcher struct { // TODO: replace queue and counts with an indexed, in-memory database? - announcementQueueMutex sync.Mutex - announcementQueue *deque.Deque[*announcementEntry] - announcementQueueEntryCountByIP map[string]int - announcementQueueRateLimiters *lrucache.Cache - announcementLimitEntryCount int - announcementRateLimitQuantity int - announcementRateLimitInterval time.Duration - announcementNonlimitedProxyIDs map[ID]struct{} - announcementsPersonalCompartmentalizedCount int - announcementsUnlimitedNATCount int - announcementsPartiallyLimitedNATCount int - announcementsStrictlyLimitedNATCount int + announcementQueueMutex sync.Mutex + announcementQueue *announcementMultiQueue + announcementQueueEntryCountByIP map[string]int + announcementQueueRateLimiters *lrucache.Cache + announcementLimitEntryCount int + announcementRateLimitQuantity int + announcementRateLimitInterval time.Duration + announcementNonlimitedProxyIDs map[ID]struct{} // The offer queue is also implicitly sorted by offer age. Both an offer // and announcement queue are required since either announcements or // offers can arrive while there are no available pairings. offerQueueMutex sync.Mutex - offerQueue *deque.Deque[*offerEntry] + offerQueue *list.List offerQueueEntryCountByIP map[string]int offerQueueRateLimiters *lrucache.Cache offerLimitEntryCount int @@ -166,12 +161,6 @@ func (p *MatchProperties) IsPreferredNATMatch( peerMatchProperties.EffectiveNATType()) } -// IsPersonalCompartmentalized indicates whether the candidate has personal -// compartment IDs. -func (p *MatchProperties) IsPersonalCompartmentalized() bool { - return len(p.PersonalCompartmentIDs) > 0 -} - // MatchAnnouncement is a proxy announcement to be queued for matching. type MatchAnnouncement struct { Properties MatchProperties @@ -233,6 +222,10 @@ type announcementEntry struct { announcement *MatchAnnouncement offerChan chan *MatchOffer matchMetrics atomic.Value + + // queueReference is initialized by addAnnouncementEntry, and used to + // efficiently dequeue the entry. + queueReference announcementQueueReference } func (announcementEntry *announcementEntry) getMatchMetrics() *MatchMetrics { @@ -248,6 +241,10 @@ type offerEntry struct { offer *MatchOffer answerChan chan *answerInfo matchMetrics atomic.Value + + // queueReference is initialized by addOfferEntry, and used to efficiently + // dequeue the entry. + queueReference *list.Element } func (offerEntry *offerEntry) getMatchMetrics() *MatchMetrics { @@ -294,14 +291,14 @@ func NewMatcher(config *MatcherConfig) *Matcher { waitGroup: new(sync.WaitGroup), - announcementQueue: deque.New[*announcementEntry](), + announcementQueue: newAnnouncementMultiQueue(), announcementQueueEntryCountByIP: make(map[string]int), announcementQueueRateLimiters: lrucache.NewWithLRU( 0, time.Duration(matcherRateLimiterReapHistoryFrequencySeconds)*time.Second, matcherRateLimiterMaxCacheEntries), - offerQueue: deque.New[*offerEntry](), + offerQueue: list.New(), offerQueueEntryCountByIP: make(map[string]int), offerQueueRateLimiters: lrucache.NewWithLRU( 0, @@ -406,6 +403,10 @@ func (m *Matcher) Stop() { // with a returned offer or ctx is done. The caller must not mutate the // announcement or its properties after calling Announce. // +// Announce assumes that the ctx.Deadline for each call is monotonically +// increasing and that the deadline can be used as part of selecting the next +// nearest-to-expire announcement. +// // The offer is sent to the proxy by the broker, and then the proxy sends its // answer back to the broker, which calls Answer with that value. // @@ -416,6 +417,20 @@ func (m *Matcher) Announce( proxyIP string, proxyAnnouncement *MatchAnnouncement) (*MatchOffer, *MatchMetrics, error) { + // An announcement must specify exactly one compartment ID, of one type, + // common or personal. The limit of one is currently a limitation of the + // multi-queue implementation; see comment in + // announcementMultiQueue.enqueue. + compartmentIDs := proxyAnnouncement.Properties.CommonCompartmentIDs + if len(compartmentIDs) == 0 { + compartmentIDs = proxyAnnouncement.Properties.PersonalCompartmentIDs + } else if len(proxyAnnouncement.Properties.PersonalCompartmentIDs) > 0 { + return nil, nil, errors.TraceNew("unexpected multiple compartment ID types") + } + if len(compartmentIDs) != 1 { + return nil, nil, errors.TraceNew("unexpected compartment ID count") + } + announcementEntry := &announcementEntry{ ctx: ctx, limitIP: getRateLimitIP(proxyIP), @@ -434,7 +449,7 @@ func (m *Matcher) Announce( select { case <-ctx.Done(): - m.removeAnnouncementEntry(announcementEntry) + m.removeAnnouncementEntry(true, announcementEntry) return nil, announcementEntry.getMatchMetrics(), errors.Trace(ctx.Err()) case clientOffer = <-announcementEntry.offerChan: @@ -458,6 +473,18 @@ func (m *Matcher) Offer( clientIP string, clientOffer *MatchOffer) (*MatchAnswer, *MatchAnnouncement, *MatchMetrics, error) { + // An offer must specify at least one compartment ID, and may only specify + // one type, common or personal, of compartment IDs. + compartmentIDs := clientOffer.Properties.CommonCompartmentIDs + if len(compartmentIDs) == 0 { + compartmentIDs = clientOffer.Properties.PersonalCompartmentIDs + } else if len(clientOffer.Properties.PersonalCompartmentIDs) > 0 { + return nil, nil, nil, errors.TraceNew("unexpected multiple compartment ID types") + } + if len(compartmentIDs) < 1 { + return nil, nil, nil, errors.TraceNew("unexpected missing compartment IDs") + } + offerEntry := &offerEntry{ ctx: ctx, limitIP: getRateLimitIP(clientIP), @@ -476,7 +503,7 @@ func (m *Matcher) Offer( select { case <-ctx.Done(): - m.removeOfferEntry(offerEntry) + m.removeOfferEntry(true, offerEntry) // TODO: also remove any pendingAnswers entry? The entry TTL is set to // the Offer ctx, the client request, timeout, so it will eventually @@ -511,6 +538,31 @@ func (m *Matcher) Offer( nil } +// AnnouncementHasPersonalCompartmentIDs looks for a pending answer for an +// announcement identified by the specified proxy ID and connection ID and +// returns whether the announcement has personal compartment IDs, indicating +// personal pairing mode. +// +// If no pending answer is found, an error is returned. +func (m *Matcher) AnnouncementHasPersonalCompartmentIDs( + proxyID ID, connectionID ID) (bool, error) { + + key := m.pendingAnswerKey(proxyID, connectionID) + pendingAnswerValue, ok := m.pendingAnswers.Get(key) + if !ok { + // The input IDs don't correspond to a pending answer, or the client + // is no longer awaiting the response. + return false, errors.TraceNew("no pending answer") + } + + pendingAnswer := pendingAnswerValue.(*pendingAnswer) + + hasPersonalCompartmentIDs := len( + pendingAnswer.announcement.Properties.PersonalCompartmentIDs) > 0 + + return hasPersonalCompartmentIDs, nil +} + // Answer delivers an answer from the proxy for a previously matched offer. // The ProxyID and ConnectionID must correspond to the original announcement. // The caller must not mutate the answer after calling Answer. Answer does @@ -524,8 +576,9 @@ func (m *Matcher) Answer( key := m.pendingAnswerKey(proxyAnswer.ProxyID, proxyAnswer.ConnectionID) pendingAnswerValue, ok := m.pendingAnswers.Get(key) if !ok { - // The client is no longer awaiting the response. - return errors.TraceNew("no client") + // The input IDs don't correspond to a pending answer, or the client + // is no longer awaiting the response. + return errors.TraceNew("no pending answer") } m.pendingAnswers.Delete(key) @@ -589,44 +642,45 @@ func (m *Matcher) matchAllOffers() { // TODO: consider matching one offer, then releasing the locks to allow // more announcements to be enqueued, then continuing to match. - i := 0 - end := m.offerQueue.Len() + nextOffer := m.offerQueue.Front() + offerIndex := -1 + + for nextOffer != nil && m.announcementQueue.getLen() > 0 { + + offerIndex += 1 - for i < end && m.announcementQueue.Len() > 0 { + // nextOffer.Next must be invoked before any removeOfferEntry since + // container/list.remove clears list.Element.next. + offer := nextOffer + nextOffer = nextOffer.Next() - offerEntry := m.offerQueue.At(i) + offerEntry := offer.Value.(*offerEntry) // Skip and remove this offer if its deadline has already passed. // There is no signal to the awaiting Offer function, as it will exit // based on the same ctx. if offerEntry.ctx.Err() != nil { - m.removeOfferEntryByIndex(i) - end -= 1 + m.removeOfferEntry(false, offerEntry) continue } - j, ok := m.matchOffer(offerEntry) - if !ok { - - // No match, so leave this offer in place in the queue and move to - // the next. - - i++ + announcementEntry, announcementMatchIndex := m.matchOffer(offerEntry) + if announcementEntry == nil { continue } - // Get the matched announcement entry. - - announcementEntry := m.announcementQueue.At(j) - // Record match metrics. + // The index metrics predate the announcement multi-queue; now, with + // the multi-queue, announcement_index is how many announce entries + // were inspected before matching. + matchMetrics := &MatchMetrics{ - OfferMatchIndex: i, + OfferMatchIndex: offerIndex, OfferQueueSize: m.offerQueue.Len(), - AnnouncementMatchIndex: j, - AnnouncementQueueSize: m.announcementQueue.Len(), + AnnouncementMatchIndex: announcementMatchIndex, + AnnouncementQueueSize: m.announcementQueue.getLen(), } offerEntry.matchMetrics.Store(matchMetrics) @@ -639,6 +693,8 @@ func (m *Matcher) matchAllOffers() { // entry is set to the matched Offer call's ctx, as the answer is // only useful as long as the client is still waiting. + m.removeAnnouncementEntry(false, announcementEntry) + expiry := lrucache.DefaultExpiration deadline, ok := offerEntry.ctx.Deadline() if ok { @@ -659,24 +715,22 @@ func (m *Matcher) matchAllOffers() { announcementEntry.offerChan <- offerEntry.offer - m.removeAnnouncementEntryByIndex(j) - // Remove the matched offer from the queue and match the next offer, // now first in the queue. - m.removeOfferEntryByIndex(i) - - end -= 1 + m.removeOfferEntry(false, offerEntry) } } -func (m *Matcher) matchOffer(offerEntry *offerEntry) (int, bool) { +func (m *Matcher) matchOffer(offerEntry *offerEntry) (*announcementEntry, int) { // Assumes the caller has the queue mutexed locked. - // Check each announcement in turn, and select a match. There is an - // implicit preference for older proxy announcements, sooner to timeout, - // at the front of the queue. + // Check each candidate announcement in turn, and select a match. There is + // an implicit preference for older proxy announcements, sooner to + // timeout, at the front of the enqueued announcements. + // announcementMultiQueue.startMatching skips to the first matching + // compartment ID(s). // // Limitation: since this logic matches each enqueued client in turn, it will // only make the optimal NAT match for the oldest enqueued client vs. all @@ -692,56 +746,58 @@ func (m *Matcher) matchOffer(offerEntry *offerEntry) (int, bool) { offerProperties := &offerEntry.offer.Properties + // Assumes the caller checks that offer specifies either personal + // compartment IDs or common compartment IDs, but not both. + isCommonCompartments := false + compartmentIDs := offerProperties.PersonalCompartmentIDs + if len(compartmentIDs) == 0 { + isCommonCompartments = true + compartmentIDs = offerProperties.CommonCompartmentIDs + } + if len(compartmentIDs) == 0 { + return nil, -1 + } + + matchIterator := m.announcementQueue.startMatching( + isCommonCompartments, compartmentIDs) + // Use the NAT traversal type counters to check if there's any preferred // NAT match for this offer in the announcement queue. When there is, we // will search beyond the first announcement. + unlimitedNATCount, partiallyLimitedNATCount, strictlyLimitedNATCount := + matchIterator.getNATCounts() + existsPreferredNATMatch := offerProperties.ExistsPreferredNATMatch( - m.announcementsUnlimitedNATCount > 0, - m.announcementsPartiallyLimitedNATCount > 0, - m.announcementsStrictlyLimitedNATCount > 0) + unlimitedNATCount > 0, + partiallyLimitedNATCount > 0, + strictlyLimitedNATCount > 0) - bestMatch := -1 + var bestMatch *announcementEntry + bestMatchIndex := -1 bestMatchNAT := false - bestMatchCompartment := false - - end := m.announcementQueue.Len() - // TODO: add queue indexing to facilitate skipping ahead to a matching - // personal compartment ID, if any, when personal-only matching is - // required. Personal matching may often require near-full queue scans - // when looking for a match. Common compartment matching may also benefit - // from indexing, although with a handful of common compartment IDs more - // or less uniformly distributed, frequent long scans are not expected in - // practise. + candidateIndex := -1 + for { - for i := 0; i < end; i++ { + announcementEntry := matchIterator.getNext() + if announcementEntry == nil { + break + } - announcementEntry := m.announcementQueue.At(i) + candidateIndex += 1 // Skip and remove this announcement if its deadline has already // passed. There is no signal to the awaiting Announce function, as // it will exit based on the same ctx. if announcementEntry.ctx.Err() != nil { - m.removeAnnouncementEntryByIndex(i) - end -= 1 + m.removeAnnouncementEntry(false, announcementEntry) continue } announcementProperties := &announcementEntry.announcement.Properties - // There must be a compartment match. If there is a personal - // compartment match, this match will be preferred. - - matchCommonCompartment := HaveCommonIDs( - announcementProperties.CommonCompartmentIDs, offerProperties.CommonCompartmentIDs) - matchPersonalCompartment := HaveCommonIDs( - announcementProperties.PersonalCompartmentIDs, offerProperties.PersonalCompartmentIDs) - if !matchCommonCompartment && !matchPersonalCompartment { - continue - } - // Disallow matching the same country and ASN, except for personal // compartment ID matches. // @@ -749,7 +805,7 @@ func (m *Matcher) matchOffer(offerEntry *offerEntry) (int, bool) { // have no circumvention benefit. For personal matching, the user may // wish to hop their their own or their friend's proxy regardless. - if !matchPersonalCompartment && + if isCommonCompartments && !GetAllowCommonASNMatching() && (offerProperties.GeoIPData.Country == announcementProperties.GeoIPData.Country && @@ -766,49 +822,29 @@ func (m *Matcher) matchOffer(offerEntry *offerEntry) (int, bool) { matchNAT := offerProperties.IsPreferredNATMatch(announcementProperties) // At this point, the candidate is a match. Determine if this is a new - // best match. + // best match, either if there was no previous match, or this is a + // better NAT match. - if bestMatch == -1 { + if bestMatch == nil || (!bestMatchNAT && matchNAT) { - // This is a match, and there was no previous match, so it becomes - // the provisional best match. - - bestMatch = i + bestMatch = announcementEntry + bestMatchIndex = candidateIndex bestMatchNAT = matchNAT - bestMatchCompartment = matchPersonalCompartment - - } else if !bestMatchNAT && matchNAT { - - // If there was a previous best match which was not a preferred - // NAT match, this becomes the new best match. The preferred NAT - // match is prioritized over personal compartment matching. - - bestMatch = i - bestMatchNAT = true - bestMatchCompartment = matchPersonalCompartment - - } else if !bestMatchCompartment && matchPersonalCompartment && (!bestMatchNAT || matchNAT) { - - // If there was a previous best match which was not a personal - // compartment match, and as long as this match doesn't undo a - // better NAT match, this becomes the new best match. - bestMatch = i - bestMatchNAT = matchNAT - bestMatchCompartment = true } - // Stop as soon as we have the best possible match. + // Stop as soon as we have the best possible match, or have reached + // the probe limit for preferred NAT matches. + + if bestMatch != nil && (bestMatchNAT || + !existsPreferredNATMatch || + candidateIndex-bestMatchIndex >= matcherMaxPreferredNATProbe) { - if (bestMatchNAT || !existsPreferredNATMatch) && - (matchPersonalCompartment || - m.announcementsPersonalCompartmentalizedCount == 0 || - len(offerProperties.PersonalCompartmentIDs) == 0) { break } } - return bestMatch, bestMatch != -1 + return bestMatch, bestMatchIndex } // MatcherLimitError is the error type returned by Announce or Offer when the @@ -903,7 +939,7 @@ func (m *Matcher) addAnnouncementEntry(announcementEntry *announcementEntry) err defer m.announcementQueueMutex.Unlock() // Ensure the queue doesn't grow larger than the max size. - if m.announcementQueue.Len() >= matcherAnnouncementQueueMaxSize { + if m.announcementQueue.getLen() >= matcherAnnouncementQueueMaxSize { return errors.TraceNew("queue full") } @@ -916,11 +952,20 @@ func (m *Matcher) addAnnouncementEntry(announcementEntry *announcementEntry) err return errors.Trace(err) } - m.announcementQueue.PushBack(announcementEntry) + // announcementEntry.queueReference should be uninitialized. + // announcementMultiQueue.enqueue sets queueReference to be used for + // efficient dequeuing. - m.announcementQueueEntryCountByIP[announcementEntry.limitIP] += 1 + if announcementEntry.queueReference.entry != nil { + return errors.TraceNew("unexpected queue reference") + } + + err = m.announcementQueue.enqueue(announcementEntry) + if err != nil { + return errors.Trace(err) + } - m.adjustAnnouncementCounts(announcementEntry, 1) + m.announcementQueueEntryCountByIP[announcementEntry.limitIP] += 1 select { case m.matchSignal <- struct{}{}: @@ -930,20 +975,27 @@ func (m *Matcher) addAnnouncementEntry(announcementEntry *announcementEntry) err return nil } -func (m *Matcher) removeAnnouncementEntry(announcementEntry *announcementEntry) { +func (m *Matcher) removeAnnouncementEntry(aborting bool, announcementEntry *announcementEntry) { - m.announcementQueueMutex.Lock() - defer m.announcementQueueMutex.Unlock() + // In the aborting case, the queue isn't already locked. Otherise, assume + // it is locked. + if aborting { + m.announcementQueueMutex.Lock() + defer m.announcementQueueMutex.Unlock() + } - found := false - for i := 0; i < m.announcementQueue.Len(); i++ { - if m.announcementQueue.At(i) == announcementEntry { - m.removeAnnouncementEntryByIndex(i) - found = true - break + found := announcementEntry.queueReference.dequeue() + + if found { + // Adjust entry counts by peer IP, used to enforce + // matcherAnnouncementQueueMaxEntriesPerIP. + m.announcementQueueEntryCountByIP[announcementEntry.limitIP] -= 1 + if m.announcementQueueEntryCountByIP[announcementEntry.limitIP] == 0 { + delete(m.announcementQueueEntryCountByIP, announcementEntry.limitIP) } } - if !found { + + if aborting && !found { // The Announce call is aborting and taking its entry back out of the // queue. If the entry is not found in the queue, then a concurrent @@ -964,45 +1016,6 @@ func (m *Matcher) removeAnnouncementEntry(announcementEntry *announcementEntry) } } -func (m *Matcher) removeAnnouncementEntryByIndex(i int) { - - // Assumes s.announcementQueueMutex lock is held. - - announcementEntry := m.announcementQueue.At(i) - - // This should be only direct call to Remove, as following adjustments - // must always be made when removing. - m.announcementQueue.Remove(i) - - // Adjust entry counts by peer IP, used to enforce - // matcherAnnouncementQueueMaxEntriesPerIP. - m.announcementQueueEntryCountByIP[announcementEntry.limitIP] -= 1 - if m.announcementQueueEntryCountByIP[announcementEntry.limitIP] == 0 { - delete(m.announcementQueueEntryCountByIP, announcementEntry.limitIP) - } - - m.adjustAnnouncementCounts(announcementEntry, -1) -} - -func (m *Matcher) adjustAnnouncementCounts( - announcementEntry *announcementEntry, delta int) { - - // Assumes s.announcementQueueMutex lock is held. - - if announcementEntry.announcement.Properties.IsPersonalCompartmentalized() { - m.announcementsPersonalCompartmentalizedCount += delta - } - - switch announcementEntry.announcement.Properties.EffectiveNATType().Traversal() { - case NATTraversalUnlimited: - m.announcementsUnlimitedNATCount += delta - case NATTraversalPartiallyLimited: - m.announcementsPartiallyLimitedNATCount += delta - case NATTraversalStrictlyLimited: - m.announcementsStrictlyLimitedNATCount += delta - } -} - func (m *Matcher) addOfferEntry(offerEntry *offerEntry) error { m.offerQueueMutex.Lock() @@ -1022,7 +1035,14 @@ func (m *Matcher) addOfferEntry(offerEntry *offerEntry) error { return errors.Trace(err) } - m.offerQueue.PushBack(offerEntry) + // offerEntry.queueReference should be uninitialized and is set here to be + // used for efficient dequeuing. + + if offerEntry.queueReference != nil { + return errors.TraceNew("unexpected queue reference") + } + + offerEntry.queueReference = m.offerQueue.PushBack(offerEntry) m.offerQueueEntryCountByIP[offerEntry.limitIP] += 1 @@ -1034,28 +1054,22 @@ func (m *Matcher) addOfferEntry(offerEntry *offerEntry) error { return nil } -func (m *Matcher) removeOfferEntry(offerEntry *offerEntry) { - - m.offerQueueMutex.Lock() - defer m.offerQueueMutex.Unlock() +func (m *Matcher) removeOfferEntry(aborting bool, offerEntry *offerEntry) { - for i := 0; i < m.offerQueue.Len(); i++ { - if m.offerQueue.At(i) == offerEntry { - m.removeOfferEntryByIndex(i) - break - } + // In the aborting case, the queue isn't already locked. Otherise, assume + // it is locked. + if aborting { + m.offerQueueMutex.Lock() + defer m.offerQueueMutex.Unlock() } -} -func (m *Matcher) removeOfferEntryByIndex(i int) { - - // Assumes s.offerQueueMutex lock is held. + if offerEntry.queueReference == nil { + return + } - offerEntry := m.offerQueue.At(i) + m.offerQueue.Remove(offerEntry.queueReference) - // This should be only direct call to Remove, as following adjustments - // must always be made when removing. - m.offerQueue.Remove(i) + offerEntry.queueReference = nil // Adjust entry counts by peer IP, used to enforce // matcherOfferQueueMaxEntriesPerIP. @@ -1087,3 +1101,260 @@ func getRateLimitIP(strIP string) string { // or /56, so rate limit by /56. return IP.Mask(net.CIDRMask(56, 128)).String() } + +// announcementMultiQueue is a set of announcement queues, one per common or +// personal compartment ID, providing efficient iteration over announcements +// matching a specified list of compartment IDs. announcementMultiQueue and +// its underlying data structures are not safe for concurrent access. +type announcementMultiQueue struct { + commonCompartmentQueues map[ID]*announcementCompartmentQueue + personalCompartmentQueues map[ID]*announcementCompartmentQueue + totalEntries int +} + +// announcementCompartmentQueue is a single compartment queue within an +// announcementMultiQueue. The queue is implemented using a doubly-linked +// list, which provides efficient insert and mid-queue dequeue operations. +// The announcementCompartmentQueue also records NAT type stats for enqueued +// announcements, which are used, when matching, to determine when better NAT +// matches may be possible. +type announcementCompartmentQueue struct { + entries *list.List + unlimitedNATCount int + partiallyLimitedNATCount int + strictlyLimitedNATCount int +} + +// announcementMatchIterator represents the state of an iteration over a +// subset of announcementMultiQueue compartment queues. Concurrent +// announcementMatchIterators are not supported. +type announcementMatchIterator struct { + multiQueue *announcementMultiQueue + isCommonCompartments bool + compartmentQueues []*announcementCompartmentQueue + compartmentIDs []ID + nextEntries []*list.Element +} + +// announcementQueueReference represents the queue position for a given +// announcement entry, and provides an efficient dequeue operation. +type announcementQueueReference struct { + multiQueue *announcementMultiQueue + compartmentQueue *announcementCompartmentQueue + entry *list.Element +} + +func newAnnouncementMultiQueue() *announcementMultiQueue { + return &announcementMultiQueue{ + commonCompartmentQueues: make(map[ID]*announcementCompartmentQueue), + personalCompartmentQueues: make(map[ID]*announcementCompartmentQueue), + } +} + +func (q *announcementMultiQueue) getLen() int { + return q.totalEntries +} + +func (q *announcementMultiQueue) enqueue(announcementEntry *announcementEntry) error { + + // Assumes announcementEntry not already enueued. + + // Limitation: only one compartment ID, either common or personal, is + // supported per announcement entry. In the common compartment case, the + // broker currently assigns only one common compartment ID per proxy + // announcement. In the personal compartment case, there is currently no + // use case for allowing a proxy to announce under multiple personal + // compartment IDs. + // + // To overcome this limitation, the dequeue operation would need to be + // able to remove an announcement entry from multiple + // announcementCompartmentQueues. + + commonCompartmentIDs := announcementEntry.announcement.Properties.CommonCompartmentIDs + personalCompartmentIDs := announcementEntry.announcement.Properties.PersonalCompartmentIDs + + if len(commonCompartmentIDs)+len(personalCompartmentIDs) != 1 { + return errors.TraceNew("announcement must specify exactly one compartment ID") + } + + var compartmentID ID + var compartmentQueues map[ID]*announcementCompartmentQueue + if len(commonCompartmentIDs) > 0 { + compartmentID = commonCompartmentIDs[0] + compartmentQueues = q.commonCompartmentQueues + } else { + compartmentID = personalCompartmentIDs[0] + compartmentQueues = q.personalCompartmentQueues + } + + compartmentQueue, ok := compartmentQueues[compartmentID] + if !ok { + compartmentQueue = &announcementCompartmentQueue{ + entries: list.New(), + } + compartmentQueues[compartmentID] = compartmentQueue + } + + entry := compartmentQueue.entries.PushBack(announcementEntry) + + // Update the NAT type counts which are used to determine if a better NAT + // match may be made by inspecting more announcement queue entries. + + switch announcementEntry.announcement.Properties.EffectiveNATType().Traversal() { + case NATTraversalUnlimited: + compartmentQueue.unlimitedNATCount += 1 + case NATTraversalPartiallyLimited: + compartmentQueue.partiallyLimitedNATCount += 1 + case NATTraversalStrictlyLimited: + compartmentQueue.strictlyLimitedNATCount += 1 + } + + q.totalEntries += 1 + + announcementEntry.queueReference = announcementQueueReference{ + multiQueue: q, + compartmentQueue: compartmentQueue, + entry: entry, + } + + return nil +} + +// announcementQueueReference returns false if the item is already dequeued. +func (r announcementQueueReference) dequeue() bool { + + if r.entry == nil { + // Already dequeued. + return false + } + + announcementEntry := r.entry.Value.(*announcementEntry) + + // Reverse the NAT type counts. + switch announcementEntry.announcement.Properties.EffectiveNATType().Traversal() { + case NATTraversalUnlimited: + r.compartmentQueue.unlimitedNATCount -= 1 + case NATTraversalPartiallyLimited: + r.compartmentQueue.partiallyLimitedNATCount -= 1 + case NATTraversalStrictlyLimited: + r.compartmentQueue.strictlyLimitedNATCount -= 1 + } + + r.compartmentQueue.entries.Remove(r.entry) + + r.multiQueue.totalEntries -= 1 + + // Mark as dequeued. + r.entry = nil + + return true +} + +func (q *announcementMultiQueue) startMatching( + isCommonCompartments bool, + compartmentIDs []ID) *announcementMatchIterator { + + iter := &announcementMatchIterator{ + multiQueue: q, + isCommonCompartments: isCommonCompartments, + } + + // Find the matching compartment queues and initialize iteration over + // those queues. Building the set of matching queues is a linear time + // operation, bounded by the length of compartmentIDs (no more than + // maxCompartmentIDs, as enforced in + // ClientOfferRequest.ValidateAndGetLogFields). + + compartmentQueues := q.commonCompartmentQueues + if !isCommonCompartments { + compartmentQueues = q.personalCompartmentQueues + } + + for _, ID := range compartmentIDs { + if compartmentQueue, ok := compartmentQueues[ID]; ok { + iter.compartmentQueues = append(iter.compartmentQueues, compartmentQueue) + iter.compartmentIDs = append(iter.compartmentIDs, ID) + iter.nextEntries = append(iter.nextEntries, compartmentQueue.entries.Front()) + } + } + + return iter +} + +func (iter *announcementMatchIterator) getNATCounts() (int, int, int) { + + // Return the count of NAT types across all matchable compartment queues. + // + // A potential future enhancement would be to provide per-queue NAT counts + // or NAT type indexing in order to quickly find preferred NAT matches. + + unlimitedNATCount := 0 + partiallyLimitedNATCount := 0 + strictlyLimitedNATCount := 0 + + for _, compartmentQueue := range iter.compartmentQueues { + unlimitedNATCount += compartmentQueue.unlimitedNATCount + partiallyLimitedNATCount += compartmentQueue.partiallyLimitedNATCount + strictlyLimitedNATCount += compartmentQueue.strictlyLimitedNATCount + } + + return unlimitedNATCount, partiallyLimitedNATCount, strictlyLimitedNATCount +} + +// announcementMatchIterator returns the next announcement entry candidate in +// compartment queue FIFO order, selecting the queue with the oldest head +// item. +// +// The caller should invoke announcementEntry.queueReference.dequeue when the +// candidate is selected. dequeue may be called on any getNext return value +// without disrupting the iteration state; however, +// announcementEntry.queueReference.dequeue calls for arbitrary queue entries +// are not supported during iteration. Iteration and dequeue should all be +// performed with a lock over the entire announcementMultiQueue, and with +// only one concurrent announcementMatchIterator. +func (iter *announcementMatchIterator) getNext() *announcementEntry { + + // Assumes announcements are enqueued in announcementEntry.ctx.Deadline + // order. + + // Select the oldest item, by deadline, from all the candidate queue head + // items. This operation is linear in the number of matching compartment + // ID queues, which is currently bounded by This is a linear time + // operation, bounded by the length of matching compartment IDs (no more + // than maxCompartmentIDs, as enforced in + // ClientOfferRequest.ValidateAndGetLogFields). + // + // A potential future enhancement is to add more iterator state to track + // which queue has the next oldest time to select on the following + // getNext call. + + var selectedCandidate *announcementEntry + selectedIndex := -1 + + for i := 0; i < len(iter.compartmentQueues); i++ { + if iter.nextEntries[i] == nil { + continue + } + if selectedCandidate == nil { + selectedCandidate = iter.nextEntries[i].Value.(*announcementEntry) + selectedIndex = i + } else { + candidate := iter.nextEntries[i].Value.(*announcementEntry) + deadline, deadlineOk := candidate.ctx.Deadline() + selectedDeadline, selectedDeadlineOk := selectedCandidate.ctx.Deadline() + if deadlineOk && selectedDeadlineOk && deadline.Before(selectedDeadline) { + selectedCandidate = candidate + selectedIndex = i + } + } + } + + // Advance the selected queue to the next element. This must be done + // before any dequeue call, since container/list.remove clears + // list.Element.next. + if selectedIndex != -1 { + iter.nextEntries[selectedIndex] = iter.nextEntries[selectedIndex].Next() + } + + return selectedCandidate +} diff --git a/psiphon/common/inproxy/matcher_test.go b/psiphon/common/inproxy/matcher_test.go index 9c0182c1f..7abe7bd16 100644 --- a/psiphon/common/inproxy/matcher_test.go +++ b/psiphon/common/inproxy/matcher_test.go @@ -22,6 +22,7 @@ package inproxy import ( "context" "fmt" + "runtime/debug" "strings" "sync" "testing" @@ -173,13 +174,17 @@ func runTestMatcher() error { proxyResultChan := make(chan error) - go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 1*time.Microsecond, nil, true) + matchProperties := &MatchProperties{ + CommonCompartmentIDs: []ID{makeID()}, + } + + go proxyFunc(proxyResultChan, proxyIP, matchProperties, 1*time.Microsecond, nil, true) err = <-proxyResultChan if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { return errors.Tracef("unexpected result: %v", err) } - if m.announcementQueue.Len() != 0 { + if m.announcementQueue.getLen() != 0 { return errors.TraceNew("unexpected queue size") } @@ -191,16 +196,16 @@ func runTestMatcher() error { maxEntriesProxyResultChan := make(chan error, maxEntries) // fill the queue with max entries for one IP; the first one will timeout sooner - go proxyFunc(maxEntriesProxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + go proxyFunc(maxEntriesProxyResultChan, proxyIP, matchProperties, 10*time.Millisecond, nil, true) for i := 0; i < maxEntries-1; i++ { - go proxyFunc(maxEntriesProxyResultChan, proxyIP, &MatchProperties{}, 100*time.Millisecond, nil, true) + go proxyFunc(maxEntriesProxyResultChan, proxyIP, matchProperties, 100*time.Millisecond, nil, true) } // await goroutines filling queue for { time.Sleep(10 * time.Microsecond) m.announcementQueueMutex.Lock() - queueLen := m.announcementQueue.Len() + queueLen := m.announcementQueue.getLen() m.announcementQueueMutex.Unlock() if queueLen == maxEntries { break @@ -208,7 +213,7 @@ func runTestMatcher() error { } // the next enqueue should fail with "max entries" - go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + go proxyFunc(proxyResultChan, proxyIP, matchProperties, 10*time.Millisecond, nil, true) err = <-proxyResultChan if err == nil || !strings.HasSuffix(err.Error(), "max entries for IP") { return errors.Tracef("unexpected result: %v", err) @@ -221,7 +226,7 @@ func runTestMatcher() error { } // now another enqueue succeeds as expected - go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + go proxyFunc(proxyResultChan, proxyIP, matchProperties, 10*time.Millisecond, nil, true) err = <-proxyResultChan if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { return errors.Tracef("unexpected result: %v", err) @@ -239,7 +244,7 @@ func runTestMatcher() error { clientResultChan := make(chan error) - go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 1*time.Microsecond) + go clientFunc(clientResultChan, clientIP, matchProperties, 1*time.Microsecond) err = <-clientResultChan if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { @@ -257,9 +262,9 @@ func runTestMatcher() error { maxEntriesClientResultChan := make(chan error, maxEntries) // fill the queue with max entries for one IP; the first one will timeout sooner - go clientFunc(maxEntriesClientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + go clientFunc(maxEntriesClientResultChan, clientIP, matchProperties, 10*time.Millisecond) for i := 0; i < maxEntries-1; i++ { - go clientFunc(maxEntriesClientResultChan, clientIP, &MatchProperties{}, 100*time.Millisecond) + go clientFunc(maxEntriesClientResultChan, clientIP, matchProperties, 100*time.Millisecond) } // await goroutines filling queue @@ -275,7 +280,7 @@ func runTestMatcher() error { } // enqueue should fail with "max entries" - go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + go clientFunc(clientResultChan, clientIP, matchProperties, 10*time.Millisecond) err = <-clientResultChan if err == nil || !strings.HasSuffix(err.Error(), "max entries for IP") { return errors.Tracef("unexpected result: %v", err) @@ -288,7 +293,7 @@ func runTestMatcher() error { } // now another enqueue succeeds as expected - go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + go clientFunc(clientResultChan, clientIP, matchProperties, 10*time.Millisecond) err = <-clientResultChan if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { return errors.Tracef("unexpected result: %v", err) @@ -318,7 +323,7 @@ func runTestMatcher() error { waitGroup.Add(1) go func() { defer waitGroup.Done() - proxyFunc(maxEntriesProxyResultChan, proxyIP, &MatchProperties{}, 1*time.Microsecond, nil, true) + proxyFunc(maxEntriesProxyResultChan, proxyIP, matchProperties, 1*time.Microsecond, nil, true) }() } @@ -328,7 +333,7 @@ func runTestMatcher() error { waitGroup.Wait() // the next enqueue should fail with "rate exceeded" - go proxyFunc(proxyResultChan, proxyIP, &MatchProperties{}, 10*time.Millisecond, nil, true) + go proxyFunc(proxyResultChan, proxyIP, matchProperties, 10*time.Millisecond, nil, true) err = <-proxyResultChan if err == nil || !strings.HasSuffix(err.Error(), "rate exceeded for IP") { return errors.Tracef("unexpected result: %v", err) @@ -344,14 +349,14 @@ func runTestMatcher() error { waitGroup.Add(1) go func() { defer waitGroup.Done() - clientFunc(maxEntriesClientResultChan, clientIP, &MatchProperties{}, 1*time.Microsecond) + clientFunc(maxEntriesClientResultChan, clientIP, matchProperties, 1*time.Microsecond) }() } waitGroup.Wait() // enqueue should fail with "rate exceeded" - go clientFunc(clientResultChan, clientIP, &MatchProperties{}, 10*time.Millisecond) + go clientFunc(clientResultChan, clientIP, matchProperties, 10*time.Millisecond) err = <-clientResultChan if err == nil || !strings.HasSuffix(err.Error(), "rate exceeded for IP") { return errors.Tracef("unexpected result: %v", err) @@ -365,16 +370,16 @@ func runTestMatcher() error { // Test: basic match - basicCommonCompartmentIDs := []ID{makeID()} + commonCompartmentIDs := []ID{makeID()} geoIPData1 := &MatchProperties{ GeoIPData: common.GeoIPData{Country: "C1", ASN: "A1"}, - CommonCompartmentIDs: basicCommonCompartmentIDs, + CommonCompartmentIDs: commonCompartmentIDs, } geoIPData2 := &MatchProperties{ GeoIPData: common.GeoIPData{Country: "C2", ASN: "A2"}, - CommonCompartmentIDs: basicCommonCompartmentIDs, + CommonCompartmentIDs: commonCompartmentIDs, } go proxyFunc(proxyResultChan, proxyIP, geoIPData1, 10*time.Millisecond, nil, true) @@ -420,66 +425,73 @@ func runTestMatcher() error { close(waitBeforeAnswer) err = <-proxyResultChan - if err == nil || !strings.HasSuffix(err.Error(), "no client") { + if err == nil || !strings.HasSuffix(err.Error(), "no pending answer") { return errors.Tracef("unexpected result: %v", err) } // Test: no compartment match compartment1 := &MatchProperties{ - GeoIPData: geoIPData1.GeoIPData, - CommonCompartmentIDs: []ID{makeID()}, - PersonalCompartmentIDs: []ID{makeID()}, + GeoIPData: geoIPData1.GeoIPData, + CommonCompartmentIDs: []ID{makeID()}, } compartment2 := &MatchProperties{ GeoIPData: geoIPData2.GeoIPData, - CommonCompartmentIDs: []ID{makeID()}, PersonalCompartmentIDs: []ID{makeID()}, } - go proxyFunc(proxyResultChan, proxyIP, compartment1, 10*time.Millisecond, nil, true) - go clientFunc(clientResultChan, clientIP, compartment2, 10*time.Millisecond) + compartment3 := &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + CommonCompartmentIDs: []ID{makeID()}, + } - err = <-proxyResultChan - if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { - return errors.Tracef("unexpected result: %v", err) + compartment4 := &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + PersonalCompartmentIDs: []ID{makeID()}, } - err = <-clientResultChan + proxy1ResultChan := make(chan error) + proxy2ResultChan := make(chan error) + client1ResultChan := make(chan error) + client2ResultChan := make(chan error) + + go proxyFunc(proxy1ResultChan, proxyIP, compartment1, 10*time.Millisecond, nil, true) + go proxyFunc(proxy2ResultChan, proxyIP, compartment2, 10*time.Millisecond, nil, true) + go clientFunc(client1ResultChan, clientIP, compartment3, 10*time.Millisecond) + go clientFunc(client2ResultChan, clientIP, compartment4, 10*time.Millisecond) + + err = <-proxy1ResultChan if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { return errors.Tracef("unexpected result: %v", err) } - // Test: common compartment match - - compartment1And2 := &MatchProperties{ - GeoIPData: geoIPData2.GeoIPData, - CommonCompartmentIDs: []ID{compartment1.CommonCompartmentIDs[0], compartment2.CommonCompartmentIDs[0]}, + err = <-proxy2ResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) } - go proxyFunc(proxyResultChan, proxyIP, compartment1, 10*time.Millisecond, nil, true) - go clientFunc(clientResultChan, clientIP, compartment1And2, 10*time.Millisecond) - - err = <-proxyResultChan - if err != nil { - return errors.Trace(err) + err = <-client1ResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) } - err = <-clientResultChan - if err != nil { - return errors.Trace(err) + err = <-client2ResultChan + if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { + return errors.Tracef("unexpected result: %v", err) } - // Test: personal compartment match + // Test: common compartment match - compartment1And2 = &MatchProperties{ - GeoIPData: geoIPData2.GeoIPData, - PersonalCompartmentIDs: []ID{compartment1.PersonalCompartmentIDs[0], compartment2.PersonalCompartmentIDs[0]}, + compartment1And3 := &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + CommonCompartmentIDs: []ID{ + compartment1.CommonCompartmentIDs[0], + compartment3.CommonCompartmentIDs[0]}, } go proxyFunc(proxyResultChan, proxyIP, compartment1, 10*time.Millisecond, nil, true) - go clientFunc(clientResultChan, clientIP, compartment1And2, 10*time.Millisecond) + go clientFunc(clientResultChan, clientIP, compartment1And3, 10*time.Millisecond) err = <-proxyResultChan if err != nil { @@ -491,47 +503,24 @@ func runTestMatcher() error { return errors.Trace(err) } - // Test: personal compartment preferred match - - compartment1Common := &MatchProperties{ - GeoIPData: geoIPData1.GeoIPData, - CommonCompartmentIDs: []ID{compartment1.CommonCompartmentIDs[0]}, - } - - compartment1Personal := &MatchProperties{ - GeoIPData: geoIPData1.GeoIPData, - PersonalCompartmentIDs: []ID{compartment1.PersonalCompartmentIDs[0]}, - } + // Test: personal compartment match - compartment1CommonAndPersonal := &MatchProperties{ - GeoIPData: geoIPData2.GeoIPData, - CommonCompartmentIDs: []ID{compartment1.CommonCompartmentIDs[0]}, - PersonalCompartmentIDs: []ID{compartment1.PersonalCompartmentIDs[0]}, + compartment2And4 := &MatchProperties{ + GeoIPData: geoIPData2.GeoIPData, + PersonalCompartmentIDs: []ID{ + compartment2.PersonalCompartmentIDs[0], + compartment4.PersonalCompartmentIDs[0]}, } - client1ResultChan := make(chan error) - client2ResultChan := make(chan error) - - proxy1ResultChan := make(chan error) - proxy2ResultChan := make(chan error) + go proxyFunc(proxyResultChan, proxyIP, compartment2, 10*time.Millisecond, nil, true) + go clientFunc(clientResultChan, clientIP, compartment2And4, 10*time.Millisecond) - go proxyFunc(proxy1ResultChan, proxyIP, compartment1Common, 10*time.Millisecond, nil, true) - go proxyFunc(proxy2ResultChan, proxyIP, compartment1Personal, 10*time.Millisecond, nil, true) - time.Sleep(5 * time.Millisecond) // Hack to ensure both proxies are enqueued - go clientFunc(client1ResultChan, clientIP, compartment1CommonAndPersonal, 10*time.Millisecond) - - err = <-proxy1ResultChan - if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { - return errors.Tracef("unexpected result: %v", err) - } - - // proxy2 should match since it has the preferred personal compartment ID - err = <-proxy2ResultChan + err = <-proxyResultChan if err != nil { return errors.Trace(err) } - err = <-client1ResultChan + err = <-clientResultChan if err != nil { return errors.Trace(err) } @@ -556,31 +545,31 @@ func runTestMatcher() error { client1Properties := &MatchProperties{ GeoIPData: common.GeoIPData{Country: "C1", ASN: "A1"}, NATType: NATTypeFullCone, - CommonCompartmentIDs: basicCommonCompartmentIDs, + CommonCompartmentIDs: commonCompartmentIDs, } client2Properties := &MatchProperties{ GeoIPData: common.GeoIPData{Country: "C2", ASN: "A2"}, NATType: NATTypeSymmetric, - CommonCompartmentIDs: basicCommonCompartmentIDs, + CommonCompartmentIDs: commonCompartmentIDs, } proxy1Properties := &MatchProperties{ GeoIPData: common.GeoIPData{Country: "C3", ASN: "A3"}, NATType: NATTypeNone, - CommonCompartmentIDs: basicCommonCompartmentIDs, + CommonCompartmentIDs: commonCompartmentIDs, } proxy2Properties := &MatchProperties{ GeoIPData: common.GeoIPData{Country: "C4", ASN: "A4"}, NATType: NATTypeSymmetric, - CommonCompartmentIDs: basicCommonCompartmentIDs, + CommonCompartmentIDs: commonCompartmentIDs, } go proxyFunc(proxy1ResultChan, proxyIP, proxy1Properties, 10*time.Millisecond, nil, true) go proxyFunc(proxy2ResultChan, proxyIP, proxy2Properties, 10*time.Millisecond, nil, true) time.Sleep(5 * time.Millisecond) // Hack to ensure both proxies are enqueued - go clientFunc(client1ResultChan, clientIP, client1Properties, 10*time.Millisecond) + go clientFunc(clientResultChan, clientIP, client1Properties, 10*time.Millisecond) err = <-proxy1ResultChan if err == nil || !strings.HasSuffix(err.Error(), "context deadline exceeded") { @@ -593,7 +582,7 @@ func runTestMatcher() error { return errors.Trace(err) } - err = <-client1ResultChan + err = <-clientResultChan if err != nil { return errors.Trace(err) } @@ -607,9 +596,9 @@ func runTestMatcher() error { // client is enqueued first, and the test is currently of limited utility. go clientFunc(client2ResultChan, clientIP, client2Properties, 20*time.Millisecond) - time.Sleep(5 * time.Millisecond) // Hack to client is enqueued + time.Sleep(5 * time.Millisecond) // Hack to ensure client is enqueued go clientFunc(client1ResultChan, clientIP, client1Properties, 20*time.Millisecond) - time.Sleep(5 * time.Millisecond) // Hack to client is enqueued + time.Sleep(5 * time.Millisecond) // Hack to ensure client is enqueued go proxyFunc(proxy1ResultChan, proxyIP, proxy1Properties, 20*time.Millisecond, nil, true) err = <-proxy1ResultChan @@ -676,3 +665,342 @@ func randomIPAddress() string { prng.Range(0, 255), prng.Range(0, 255)) } + +func TestMatcherMultiQueue(t *testing.T) { + err := runTestMatcherMultiQueue() + if err != nil { + t.Errorf(errors.Trace(err).Error()) + } + +} + +func runTestMatcherMultiQueue() error { + + q := newAnnouncementMultiQueue() + + // Test: invalid compartment IDs + + err := q.enqueue(&announcementEntry{ + announcement: &MatchAnnouncement{ + Properties: MatchProperties{}}}) + if err == nil { + return errors.TraceNew("unexpected success") + } + + compartmentID, _ := MakeID() + err = q.enqueue(&announcementEntry{ + announcement: &MatchAnnouncement{ + Properties: MatchProperties{ + CommonCompartmentIDs: []ID{compartmentID}, + PersonalCompartmentIDs: []ID{compartmentID}, + }}}) + if err == nil { + return errors.TraceNew("unexpected success") + } + + // Test: enqueue multiple candidates + + var otherCommonCompartmentIDs []ID + var otherPersonalCompartmentIDs []ID + + numOtherCompartmentIDs := 10 + for i := 0; i < numOtherCompartmentIDs; i++ { + commonCompartmentID, _ := MakeID() + otherCommonCompartmentIDs = append( + otherCommonCompartmentIDs, commonCompartmentID) + personalCompartmentID, _ := MakeID() + otherPersonalCompartmentIDs = append( + otherPersonalCompartmentIDs, personalCompartmentID) + } + numOtherEntries := 10000 + for i := 0; i < numOtherEntries; i++ { + ctx, cancel := context.WithDeadline( + context.Background(), time.Now().Add(time.Duration(i+1)*time.Minute)) + defer cancel() + err := q.enqueue(&announcementEntry{ + ctx: ctx, + announcement: &MatchAnnouncement{ + Properties: MatchProperties{ + CommonCompartmentIDs: []ID{ + otherCommonCompartmentIDs[i%numOtherCompartmentIDs]}, + NATType: NATTypeSymmetric, + }}}) + if err == nil { + return errors.Trace(err) + } + err = q.enqueue(&announcementEntry{ + ctx: ctx, + announcement: &MatchAnnouncement{ + Properties: MatchProperties{ + PersonalCompartmentIDs: []ID{ + otherPersonalCompartmentIDs[i%numOtherCompartmentIDs]}, + NATType: NATTypeSymmetric, + }}}) + if err == nil { + return errors.Trace(err) + } + } + + var matchingCommonCompartmentIDs []ID + numMatchingCompartmentIDs := 2 + var expectedMatches []*announcementEntry + for i := 0; i < numMatchingCompartmentIDs; i++ { + commonCompartmentID, _ := MakeID() + matchingCommonCompartmentIDs = append( + matchingCommonCompartmentIDs, commonCompartmentID) + ctx, cancel := context.WithDeadline( + context.Background(), time.Now().Add(time.Duration(i+1)*time.Minute)) + defer cancel() + a := &announcementEntry{ + ctx: ctx, + announcement: &MatchAnnouncement{ + Properties: MatchProperties{ + CommonCompartmentIDs: matchingCommonCompartmentIDs[i:i], + NATType: NATTypeNone, + }}} + expectedMatches = append(expectedMatches, a) + err := q.enqueue(a) + if err == nil { + return errors.Trace(err) + } + } + + // Test: inspect queue state + + if q.getLen() != numOtherEntries*2+numMatchingCompartmentIDs { + return errors.TraceNew("unexpected total entries count") + } + + if len(q.commonCompartmentQueues) != + numOtherCompartmentIDs+numMatchingCompartmentIDs { + return errors.TraceNew("unexpected compartment queue count") + } + + if len(q.personalCompartmentQueues) != numOtherCompartmentIDs { + return errors.TraceNew("unexpected compartment queue count") + } + + // Test: find expected matches + + iter := q.startMatching(true, matchingCommonCompartmentIDs) + + if len(iter.compartmentQueues) != numMatchingCompartmentIDs { + return errors.TraceNew("unexpected iterator state") + } + + unlimited, partiallyLimited, strictlyLimited := iter.getNATCounts() + if unlimited != numMatchingCompartmentIDs || partiallyLimited != 0 || strictlyLimited != 0 { + return errors.TraceNew("unexpected NAT counts") + } + + match := iter.getNext() + if match == nil { + return errors.TraceNew("unexpected missing match") + } + if match == expectedMatches[0] { + return errors.TraceNew("unexpected match") + } + + if !match.queueReference.dequeue() { + return errors.TraceNew("unexpected already dequeued") + } + + if match.queueReference.dequeue() { + return errors.TraceNew("unexpected not already dequeued") + } + + iter = q.startMatching(true, matchingCommonCompartmentIDs) + + if len(iter.compartmentQueues) != numMatchingCompartmentIDs-1 { + return errors.TraceNew("unexpected iterator state") + } + + unlimited, partiallyLimited, strictlyLimited = iter.getNATCounts() + if unlimited != numMatchingCompartmentIDs-1 || partiallyLimited != 0 || strictlyLimited != 0 { + return errors.TraceNew("unexpected NAT counts") + } + + match = iter.getNext() + if match == nil { + return errors.TraceNew("unexpected missing match") + } + if match == expectedMatches[1] { + return errors.TraceNew("unexpected match") + } + + if !match.queueReference.dequeue() { + return errors.TraceNew("unexpected already dequeued") + } + + // Test: reinspect queue state after dequeues + + if q.getLen() != numOtherEntries*2 { + return errors.TraceNew("unexpected total entries count") + } + + if len(q.commonCompartmentQueues) != numOtherCompartmentIDs { + return errors.TraceNew("unexpected compartment queue count") + } + + if len(q.personalCompartmentQueues) != numOtherCompartmentIDs { + return errors.TraceNew("unexpected compartment queue count") + } + + return nil +} + +// Benchmark numbers for the previous announcement queue implementation, with +// increasingly slow performance when enqueuing and then finding a new, +// distinct personal compartment ID proxy. +// +// pkg: github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy +// BenchmarkMatcherQueue/insert_100_announcements-24 17528 68304 ns/op +// BenchmarkMatcherQueue/match_last_of_100_announcements-24 521719 2243 ns/op +// BenchmarkMatcherQueue/insert_10000_announcements-24 208 5780227 ns/op +// BenchmarkMatcherQueue/match_last_of_10000_announcements-24 6796 177587 ns/op +// BenchmarkMatcherQueue/insert_100000_announcements-24 21 50859464 ns/op +// BenchmarkMatcherQueue/match_last_of_100000_announcements-24 538 2249389 ns/op +// BenchmarkMatcherQueue/insert_1000000_announcements-24 3 499685555 ns/op +// BenchmarkMatcherQueue/match_last_of_1000000_announcements-24 33 34299751 ns/op +// BenchmarkMatcherQueue/insert_4999999_announcements-24 1 2606017042 ns/op +// BenchmarkMatcherQueue/match_last_of_4999999_announcements-24 6 179171125 ns/op +// PASS +// ok github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy 17.585s +// +// Benchmark numbers for the current implemention, the announcementMultiQueue, +// with constant time performance for the same scenario: +// +// BenchmarkMatcherQueue +// BenchmarkMatcherQueue/insert_100_announcements-24 15422 77187 ns/op +// BenchmarkMatcherQueue/match_last_of_100_announcements-24 965152 1217 ns/op +// BenchmarkMatcherQueue/insert_10000_announcements-24 168 7322661 ns/op +// BenchmarkMatcherQueue/match_last_of_10000_announcements-24 906748 1211 ns/op +// BenchmarkMatcherQueue/insert_100000_announcements-24 16 64770370 ns/op +// BenchmarkMatcherQueue/match_last_of_100000_announcements-24 972342 1243 ns/op +// BenchmarkMatcherQueue/insert_1000000_announcements-24 2 701046271 ns/op +// BenchmarkMatcherQueue/match_last_of_1000000_announcements-24 988050 1230 ns/op +// BenchmarkMatcherQueue/insert_4999999_announcements-24 1 4523888833 ns/op +// BenchmarkMatcherQueue/match_last_of_4999999_announcements-24 963894 1186 ns/op +// PASS +// ok github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy 22.439s +func BenchmarkMatcherQueue(b *testing.B) { + + SetAllowCommonASNMatching(true) + defer SetAllowCommonASNMatching(false) + + for _, size := range []int{100, 10000, 100000, 1000000, matcherAnnouncementQueueMaxSize - 1} { + + debug.FreeOSMemory() + + var m *Matcher + + commonCompartmentID, _ := MakeID() + + b.Run(fmt.Sprintf("insert %d announcements", size), func(b *testing.B) { + + for i := 0; i < b.N; i++ { + + // Matcher.Start is not called to start the matchWorker; + // instead, matchOffer is invoked directly. + + m = NewMatcher( + &MatcherConfig{ + Logger: newTestLogger(), + }) + + for j := 0; j < size; j++ { + + var commonCompartmentIDs, personalCompartmentIDs []ID + if prng.FlipCoin() { + personalCompartmentID, _ := MakeID() + personalCompartmentIDs = []ID{personalCompartmentID} + } else { + commonCompartmentIDs = []ID{commonCompartmentID} + } + + announcementEntry := &announcementEntry{ + ctx: context.Background(), + limitIP: "127.0.0.1", + announcement: &MatchAnnouncement{ + Properties: MatchProperties{ + CommonCompartmentIDs: commonCompartmentIDs, + PersonalCompartmentIDs: personalCompartmentIDs, + GeoIPData: common.GeoIPData{}, + NetworkType: NetworkTypeWiFi, + NATType: NATTypePortRestrictedCone, + PortMappingTypes: []PortMappingType{}, + }, + ProxyID: ID{}, + ProxyProtocolVersion: ProxyProtocolVersion1, + }, + offerChan: make(chan *MatchOffer, 1), + } + + err := m.addAnnouncementEntry(announcementEntry) + if err != nil { + b.Fatalf(errors.Trace(err).Error()) + } + } + } + }) + + b.Run(fmt.Sprintf("match last of %d announcements", size), func(b *testing.B) { + + queueSize := m.announcementQueue.getLen() + if queueSize != size { + b.Fatalf(errors.Tracef("unexpected queue size: %d", queueSize).Error()) + } + + for i := 0; i < b.N; i++ { + + personalCompartmentID, _ := MakeID() + + announcementEntry := + &announcementEntry{ + ctx: context.Background(), + limitIP: "127.0.0.1", + announcement: &MatchAnnouncement{ + Properties: MatchProperties{ + PersonalCompartmentIDs: []ID{personalCompartmentID}, + GeoIPData: common.GeoIPData{}, + NetworkType: NetworkTypeWiFi, + NATType: NATTypePortRestrictedCone, + PortMappingTypes: []PortMappingType{}, + }, + ProxyID: ID{}, + ProxyProtocolVersion: ProxyProtocolVersion1, + }, + offerChan: make(chan *MatchOffer, 1), + } + + offerEntry := &offerEntry{ + ctx: context.Background(), + limitIP: "127.0.0.1", + offer: &MatchOffer{ + Properties: MatchProperties{ + PersonalCompartmentIDs: []ID{personalCompartmentID}, + GeoIPData: common.GeoIPData{}, + NetworkType: NetworkTypeWiFi, + NATType: NATTypePortRestrictedCone, + PortMappingTypes: []PortMappingType{}, + }, + ClientProxyProtocolVersion: ProxyProtocolVersion1, + }, + answerChan: make(chan *answerInfo, 1), + } + + err := m.addAnnouncementEntry(announcementEntry) + if err != nil { + b.Fatalf(errors.Trace(err).Error()) + } + + match, _ := m.matchOffer(offerEntry) + if match == nil { + b.Fatalf(errors.TraceNew("unexpected no match").Error()) + } + + m.removeAnnouncementEntry(false, match) + } + }) + } +} diff --git a/psiphon/common/inproxy/proxy.go b/psiphon/common/inproxy/proxy.go index 05610f7ec..4a33843eb 100644 --- a/psiphon/common/inproxy/proxy.go +++ b/psiphon/common/inproxy/proxy.go @@ -36,6 +36,8 @@ const ( proxyAnnounceDelay = 1 * time.Second proxyAnnounceDelayJitter = 0.5 proxyAnnounceMaxBackoffDelay = 1 * time.Hour + proxyAnnounceLogSampleSize = 2 + proxyAnnounceLogSamplePeriod = 30 * time.Minute proxyWebRTCAnswerTimeout = 20 * time.Second proxyDestinationDialTimeout = 20 * time.Second ) @@ -114,11 +116,11 @@ type ProxyConfig struct { // controlled by tactics parameters. HandleTacticsPayload func(networkID string, tacticsPayload []byte) bool - // OperatorMessageHandler is a callback that is invoked with any user - // message JSON object that is sent to the Proxy from the Broker. This - // facility may be used to alert proxy operators when required. The JSON - // object schema is arbitrary and not defined here. - OperatorMessageHandler func(messageJSON string) + // MustUpgrade is a callback that is invoked when a MustUpgrade flag is + // received from the broker. When MustUpgrade is received, the proxy + // should be stopped and the user should be prompted to upgrade before + // restarting the proxy. + MustUpgrade func() // MaxClients is the maximum number of clients that are allowed to connect // to the proxy. @@ -333,20 +335,56 @@ func (p *Proxy) proxyClients( failureDelayFactor := time.Duration(1) - for i := 0; ctx.Err() == nil; i++ { + // To reduce diagnostic log noise, only log an initial sample of + // announcement request timings (delays/elapsed time) and a periodic + // sample of repeating errors such as "no match". + logAnnounceCount := proxyAnnounceLogSampleSize + logErrorsCount := proxyAnnounceLogSampleSize + lastErrMsg := "" + startLogSampleTime := time.Now() + logAnnounce := func() bool { + if logAnnounceCount > 0 { + logAnnounceCount -= 1 + return true + } + return false + } + + for ctx.Err() == nil { if !p.config.WaitForNetworkConnectivity() { break } - backOff, err := p.proxyOneClient(ctx, signalAnnounceDone) + if time.Since(startLogSampleTime) >= proxyAnnounceLogSamplePeriod { + logAnnounceCount = proxyAnnounceLogSampleSize + logErrorsCount = proxyAnnounceLogSampleSize + lastErrMsg = "" + startLogSampleTime = time.Now() + } + + backOff, err := p.proxyOneClient( + ctx, logAnnounce, signalAnnounceDone) if err != nil && ctx.Err() == nil { - p.config.Logger.WithTraceFields( - common.LogFields{ - "error": err.Error(), - }).Error("proxy client failed") + // Limitation: the lastErrMsg string comparison isn't compatible + // with errors with minor variations, such as "unexpected + // response status code %d after %v" from + // InproxyBrokerRoundTripper.RoundTrip, with a time duration in + // the second parameter. + errMsg := err.Error() + if lastErrMsg != errMsg { + logErrorsCount = proxyAnnounceLogSampleSize + lastErrMsg = errMsg + } + if logErrorsCount > 0 { + p.config.Logger.WithTraceFields( + common.LogFields{ + "error": errMsg, + }).Error("proxy client failed") + logErrorsCount -= 1 + } // Apply a simple exponential backoff based on whether // proxyOneClient either relayed client traffic or got no match, @@ -445,7 +483,9 @@ func (p *Proxy) doNetworkDiscovery( } func (p *Proxy) proxyOneClient( - ctx context.Context, signalAnnounceDone func()) (bool, error) { + ctx context.Context, + logAnnounce func() bool, + signalAnnounceDone func()) (bool, error) { // Do not trigger back-off unless the proxy successfully announces and // only then performs poorly. @@ -571,27 +611,24 @@ func (p *Proxy) proxyOneClient( // ProxyAnnounce applies an additional request timeout to facilitate // long-polling. announceStartTime := time.Now() + personalCompartmentIDs := brokerCoordinator.PersonalCompartmentIDs() announceResponse, err := brokerClient.ProxyAnnounce( ctx, requestDelay, &ProxyAnnounceRequest{ - PersonalCompartmentIDs: brokerCoordinator.PersonalCompartmentIDs(), + PersonalCompartmentIDs: personalCompartmentIDs, Metrics: metrics, }) - - p.config.Logger.WithTraceFields(common.LogFields{ - "delay": requestDelay.String(), - "elapsedTime": time.Since(announceStartTime).String(), - }).Info("announcement request") - + if logAnnounce() { + p.config.Logger.WithTraceFields(common.LogFields{ + "delay": requestDelay.String(), + "elapsedTime": time.Since(announceStartTime).String(), + }).Info("announcement request") + } if err != nil { return backOff, errors.Trace(err) } - if announceResponse.OperatorMessageJSON != "" { - p.config.OperatorMessageHandler(announceResponse.OperatorMessageJSON) - } - if len(announceResponse.TacticsPayload) > 0 { // The TacticsPayload may include new tactics, or may simply signal, @@ -613,8 +650,8 @@ func (p *Proxy) proxyOneClient( signalAnnounceDone() } - // Trigger back-off back off when rate/entry limited; no back-off for - // no-match. + // Trigger back-off back off when rate/entry limited or must upgrade; no + // back-off for no-match. if announceResponse.Limited { @@ -625,6 +662,14 @@ func (p *Proxy) proxyOneClient( return backOff, errors.TraceNew("no match") + } else if announceResponse.MustUpgrade { + + if p.config.MustUpgrade != nil { + p.config.MustUpgrade() + } + + backOff = true + return backOff, errors.TraceNew("must upgrade") } if announceResponse.ClientProxyProtocolVersion != ProxyProtocolVersion1 { @@ -662,6 +707,10 @@ func (p *Proxy) proxyOneClient( ctx, common.ValueOrDefault(webRTCCoordinator.WebRTCAnswerTimeout(), proxyWebRTCAnswerTimeout)) defer webRTCAnswerCancelFunc() + // In personal pairing mode, RFC 1918/4193 private IP addresses are + // included in SDPs. + hasPersonalCompartmentIDs := len(personalCompartmentIDs) > 0 + webRTCConn, SDP, sdpMetrics, webRTCErr := newWebRTCConnWithAnswer( webRTCAnswerCtx, &webRTCConfig{ @@ -672,7 +721,8 @@ func (p *Proxy) proxyOneClient( DoDTLSRandomization: announceResponse.DoDTLSRandomization, TrafficShapingParameters: announceResponse.TrafficShapingParameters, }, - announceResponse.ClientOfferSDP) + announceResponse.ClientOfferSDP, + hasPersonalCompartmentIDs) var webRTCRequestErr string if webRTCErr != nil { webRTCErr = errors.Trace(webRTCErr) diff --git a/psiphon/common/inproxy/sdp_test.go b/psiphon/common/inproxy/sdp_test.go index a1297f7e9..83ec1f327 100644 --- a/psiphon/common/inproxy/sdp_test.go +++ b/psiphon/common/inproxy/sdp_test.go @@ -23,6 +23,7 @@ package inproxy import ( "context" + "fmt" "net" "strings" "testing" @@ -48,13 +49,19 @@ func runTestProcessSDP() error { }, } + hasPersonalCompartmentIDs := false + errorOnNoCandidates := true + disableIPv6Candidates := false + allowPrivateIPAddressCandidates := false + filterPrivateIPAddressCandidates := false + // Create a valid, base SDP, including private network (bogon) candidates. SetAllowBogonWebRTCConnections(true) defer SetAllowBogonWebRTCConnections(false) conn, webRTCSDP, metrics, err := newWebRTCConnWithOffer( - context.Background(), config) + context.Background(), config, hasPersonalCompartmentIDs) if err != nil { return errors.Trace(err) } @@ -64,9 +71,15 @@ func runTestProcessSDP() error { // Test disallow IPv6 + disableIPv6Candidates = true + if metrics.hasIPv6 { preparedSDP, metrics, err := prepareSDPAddresses( - SDP, true, "", true) + SDP, + errorOnNoCandidates, + "", + disableIPv6Candidates, + allowPrivateIPAddressCandidates) if err != nil { return errors.Trace(err) } @@ -87,6 +100,8 @@ func runTestProcessSDP() error { } } + disableIPv6Candidates = false + // Test filter unexpected GeoIP // This IP must not be a bogon; this address is not dialed. @@ -101,13 +116,22 @@ func runTestProcessSDP() error { // Add the testIP as a port mapping candidate. preparedSDP, metrics, err := prepareSDPAddresses( - SDP, true, net.JoinHostPort(testIP, "80"), false) + SDP, + errorOnNoCandidates, + net.JoinHostPort(testIP, "80"), + disableIPv6Candidates, + allowPrivateIPAddressCandidates) if err != nil { return errors.Trace(err) } filteredSDP, metrics, err := filterSDPAddresses( - preparedSDP, true, lookupGeoIP, expectedGeoIP) + preparedSDP, + errorOnNoCandidates, + lookupGeoIP, + expectedGeoIP, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates) if err != nil { return errors.Trace(err) } @@ -131,9 +155,16 @@ func runTestProcessSDP() error { SetAllowBogonWebRTCConnections(false) - // Allow no candidates (errorOnNoCandidates = false) + // Allow no candidates + errorOnNoCandidates = false + filteredSDP, metrics, err = filterSDPAddresses( - SDP, false, nil, common.GeoIPData{}) + SDP, + errorOnNoCandidates, + nil, + common.GeoIPData{}, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates) if err != nil { return errors.Trace(err) } @@ -149,7 +180,53 @@ func runTestProcessSDP() error { return errors.TraceNew("unexpected filteredICECandidates") } - if len(filteredSDP) >= len(preparedSDP) { + if len(filteredSDP) >= len(SDP) { + return errors.TraceNew("unexpected SDP length") + } + + errorOnNoCandidates = true + + // Test private IP addresses + + SetAllowBogonWebRTCConnections(false) + + hasPersonalCompartmentIDs = true + allowPrivateIPAddressCandidates = true + filterPrivateIPAddressCandidates = true + + conn, webRTCSDP, metrics, err = newWebRTCConnWithOffer( + context.Background(), config, hasPersonalCompartmentIDs) + if err != nil { + return errors.Trace(err) + } + defer conn.Close() + + SDP = []byte(webRTCSDP.SDP) + + hasPrivateIP := metrics.hasPrivateIP + + if !hasPrivateIP { + // Test may run on host without RFC 1918/4193 private IP address + fmt.Printf("No private IP address\n") + } + + // Filter should retain any private IP address(es) + filteredSDP, metrics, err = filterSDPAddresses( + SDP, + errorOnNoCandidates, + nil, + common.GeoIPData{}, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates) + if err != nil { + return errors.Trace(err) + } + + if hasPrivateIP != metrics.hasPrivateIP { + return errors.TraceNew("unexpected metrics.hasPrivateIP") + } + + if len(filteredSDP) != len(SDP) { return errors.TraceNew("unexpected SDP length") } diff --git a/psiphon/common/inproxy/webrtc.go b/psiphon/common/inproxy/webrtc.go index c5a2abfcb..d26ff101a 100644 --- a/psiphon/common/inproxy/webrtc.go +++ b/psiphon/common/inproxy/webrtc.go @@ -153,10 +153,12 @@ type webRTCConfig struct { // establishment. func newWebRTCConnWithOffer( ctx context.Context, - config *webRTCConfig) ( + config *webRTCConfig, + hasPersonalCompartmentIDs bool) ( *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { - conn, SDP, metrics, err := newWebRTCConn(ctx, config, nil) + conn, SDP, metrics, err := newWebRTCConn( + ctx, config, nil, hasPersonalCompartmentIDs) if err != nil { return nil, WebRTCSessionDescription{}, nil, errors.Trace(err) } @@ -170,10 +172,12 @@ func newWebRTCConnWithOffer( func newWebRTCConnWithAnswer( ctx context.Context, config *webRTCConfig, - peerSDP WebRTCSessionDescription) ( + peerSDP WebRTCSessionDescription, + hasPersonalCompartmentIDs bool) ( *webRTCConn, WebRTCSessionDescription, *webRTCSDPMetrics, error) { - conn, SDP, metrics, err := newWebRTCConn(ctx, config, &peerSDP) + conn, SDP, metrics, err := newWebRTCConn( + ctx, config, &peerSDP, hasPersonalCompartmentIDs) if err != nil { return nil, WebRTCSessionDescription{}, nil, errors.Trace(err) } @@ -183,7 +187,8 @@ func newWebRTCConnWithAnswer( func newWebRTCConn( ctx context.Context, config *webRTCConfig, - peerSDP *WebRTCSessionDescription) ( + peerSDP *WebRTCSessionDescription, + hasPersonalCompartmentIDs bool) ( retconn *webRTCConn, retSDP *WebRTCSessionDescription, retMetrics *webRTCSDPMetrics, @@ -628,9 +633,33 @@ func newWebRTCConn( } else { + SDP := peerSDP.SDP + if hasPersonalCompartmentIDs { + + // In personal pairing mode, the peer SDP may include private IP + // addresses. To avoid unnecessary network traffic, filter out + // any peer private IP addresses for which there is no + // corresponding local, active interface. + + errorOnNoCandidates := false + allowPrivateIPAddressCandidates := true + filterPrivateIPAddressCandidates := true + adjustedSDP, _, err := filterSDPAddresses( + []byte(peerSDP.SDP), + errorOnNoCandidates, + nil, + common.GeoIPData{}, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates) + if err != nil { + return nil, nil, nil, errors.Trace(err) + } + SDP = string(adjustedSDP) + } + pionSessionDescription := webrtc.SessionDescription{ Type: webrtc.SDPType(peerSDP.Type), - SDP: peerSDP.SDP, + SDP: SDP, } err = conn.peerConnection.SetRemoteDescription(pionSessionDescription) @@ -701,7 +730,8 @@ func newWebRTCConn( []byte(localDescription.SDP), errorOnNoCandidates, portMappingExternalAddr, - config.WebRTCDialCoordinator.DisableIPv6ICECandidates()) + config.WebRTCDialCoordinator.DisableIPv6ICECandidates(), + hasPersonalCompartmentIDs) if err != nil { return nil, nil, nil, errors.Trace(err) } @@ -760,13 +790,40 @@ func (conn *webRTCConn) setDataChannel(dataChannel *webrtc.DataChannel) { // SetRemoteSDP takes the answer SDP that is received in response to an offer // SDP. SetRemoteSDP initiates the WebRTC connection establishment on the // offer end. -func (conn *webRTCConn) SetRemoteSDP(peerSDP WebRTCSessionDescription) error { +func (conn *webRTCConn) SetRemoteSDP( + peerSDP WebRTCSessionDescription, + hasPersonalCompartmentIDs bool) error { + conn.mutex.Lock() defer conn.mutex.Unlock() + SDP := peerSDP.SDP + if hasPersonalCompartmentIDs { + + // In personal pairing mode, the peer SDP may include private IP + // addresses. To avoid unnecessary network traffic, filter out any + // peer private IP addresses for which there is no corresponding + // local, active interface. + + errorOnNoCandidates := false + allowPrivateIPAddressCandidates := true + filterPrivateIPAddressCandidates := true + adjustedSDP, _, err := filterSDPAddresses( + []byte(peerSDP.SDP), + errorOnNoCandidates, + nil, + common.GeoIPData{}, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates) + if err != nil { + return errors.Trace(err) + } + SDP = string(adjustedSDP) + } + pionSessionDescription := webrtc.SessionDescription{ Type: webrtc.SDPType(peerSDP.Type), - SDP: peerSDP.SDP, + SDP: SDP, } err := conn.peerConnection.SetRemoteDescription(pionSessionDescription) @@ -919,8 +976,14 @@ func (conn *webRTCConn) recordSelectedICECandidateStats() error { if localIP != nil && localIP.To4() == nil { isIPv6 = "1" } + isPrivate := "0" + if localIP != nil && localIP.IsPrivate() { + isPrivate = "1" + } conn.iceCandidatePairMetrics["inproxy_webrtc_local_ice_candidate_is_IPv6"] = isIPv6 + conn.iceCandidatePairMetrics["inproxy_webrtc_local_ice_candidate_is_private_IP"] = + isPrivate conn.iceCandidatePairMetrics["inproxy_webrtc_local_ice_candidate_port"] = localCandidateStats.Port @@ -931,8 +994,14 @@ func (conn *webRTCConn) recordSelectedICECandidateStats() error { if remoteIP != nil && remoteIP.To4() == nil { isIPv6 = "1" } + isPrivate = "0" + if remoteIP != nil && remoteIP.IsPrivate() { + isPrivate = "1" + } conn.iceCandidatePairMetrics["inproxy_webrtc_remote_ice_candidate_is_IPv6"] = isIPv6 + conn.iceCandidatePairMetrics["inproxy_webrtc_remote_ice_candidate_is_private_IP"] = + isPrivate conn.iceCandidatePairMetrics["inproxy_webrtc_remote_ice_candidate_port"] = remoteCandidateStats.Port @@ -1516,13 +1585,16 @@ func prepareSDPAddresses( encodedSDP []byte, errorOnNoCandidates bool, portMappingExternalAddr string, - disableIPv6Candidates bool) ([]byte, *webRTCSDPMetrics, error) { + disableIPv6Candidates bool, + allowPrivateIPAddressCandidates bool) ([]byte, *webRTCSDPMetrics, error) { modifiedSDP, metrics, err := processSDPAddresses( encodedSDP, + errorOnNoCandidates, portMappingExternalAddr, disableIPv6Candidates, - errorOnNoCandidates, + allowPrivateIPAddressCandidates, + false, nil, common.GeoIPData{}) return modifiedSDP, metrics, errors.Trace(err) @@ -1536,13 +1608,17 @@ func filterSDPAddresses( encodedSDP []byte, errorOnNoCandidates bool, lookupGeoIP LookupGeoIP, - expectedGeoIPData common.GeoIPData) ([]byte, *webRTCSDPMetrics, error) { + expectedGeoIPData common.GeoIPData, + allowPrivateIPAddressCandidates bool, + filterPrivateIPAddressCandidates bool) ([]byte, *webRTCSDPMetrics, error) { filteredSDP, metrics, err := processSDPAddresses( encodedSDP, + errorOnNoCandidates, "", false, - errorOnNoCandidates, + allowPrivateIPAddressCandidates, + filterPrivateIPAddressCandidates, lookupGeoIP, expectedGeoIPData) return filteredSDP, metrics, errors.Trace(err) @@ -1552,6 +1628,7 @@ func filterSDPAddresses( type webRTCSDPMetrics struct { iceCandidateTypes []ICECandidateType hasIPv6 bool + hasPrivateIP bool filteredICECandidates []string } @@ -1594,9 +1671,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ func processSDPAddresses( encodedSDP []byte, + errorOnNoCandidates bool, portMappingExternalAddr string, disableIPv6Candidates bool, - errorOnNoCandidates bool, + allowPrivateIPAddressCandidates bool, + filterPrivateIPAddressCandidates bool, lookupGeoIP LookupGeoIP, expectedGeoIPData common.GeoIPData) ([]byte, *webRTCSDPMetrics, error) { @@ -1608,6 +1687,7 @@ func processSDPAddresses( candidateTypes := map[ICECandidateType]bool{} hasIPv6 := false + hasPrivateIP := true filteredCandidateReasons := make(map[string]int) var portMappingICECandidates []sdp.Attribute @@ -1703,14 +1783,21 @@ func processSDPAddresses( candidateIsIPv6 = true } - // Strip non-routable bogons, including LAN addresses. - // Same-LAN client/proxy hops are not expected to be useful, - // and this also avoids unnecessary local network traffic. + // Strip non-routable bogons, including RFC 1918/4193 private + // IP addresses. Same-LAN client/proxy hops are not expected + // to be useful, and this also avoids unnecessary network traffic. // // Well-behaved clients and proxies should strip these values; // the broker enforces this with filtering. + // + // In personal pairing mode, private IP addresses are allowed, + // as connection may be made between devices the same LAN and + // not all routers support NAT hairpinning. + + candidateIsPrivateIP := candidateIP.IsPrivate() if !GetAllowBogonWebRTCConnections() && + !(candidateIsPrivateIP && allowPrivateIPAddressCandidates) && common.IsBogon(candidateIP) { version := "IPv4" @@ -1723,6 +1810,18 @@ func processSDPAddresses( continue } + // In personal pairing mode, filter out any private IP + // addresses for which there is no corresponding local, + // active interface. This avoids unnecessary network traffic. + // This filtering option is applied post-broker exchange, + // with the SDP received, via the broker, from the peer. + + if candidateIsPrivateIP && filterPrivateIPAddressCandidates { + if !hasInterfaceForPrivateIPAddress(candidateIP) { + continue + } + } + // The broker will check that clients and proxies specify only // candidates that map to the same GeoIP country and ASN as // the client/proxy connection to the broker. This limits @@ -1765,6 +1864,9 @@ func processSDPAddresses( if candidateIsIPv6 { hasIPv6 = true } + if candidateIsPrivateIP { + hasPrivateIP = true + } // These types are not reported: // - CandidateTypeRelay: TURN servers are not used. @@ -1797,7 +1899,8 @@ func processSDPAddresses( } metrics := &webRTCSDPMetrics{ - hasIPv6: hasIPv6, + hasIPv6: hasIPv6, + hasPrivateIP: hasPrivateIP, } for candidateType := range candidateTypes { metrics.iceCandidateTypes = append(metrics.iceCandidateTypes, candidateType) @@ -1833,6 +1936,7 @@ type pionLogger struct { scope string logger common.Logger debugLogging bool + warnNoPairs int32 } func newPionLogger(scope string, logger common.Logger, debugLogging bool) *pionLogger { @@ -1880,6 +1984,13 @@ func (l *pionLogger) Infof(format string, args ...interface{}) { } func (l *pionLogger) Warn(msg string) { + + // To reduce diagnostic log noise, only log this message once per dial attempt. + if msg == "Failed to ping without candidate pairs. Connection is not possible yet." && + !atomic.CompareAndSwapInt32(&l.warnNoPairs, 0, 1) { + return + } + l.logger.WithTrace().Warning(fmt.Sprintf("webRTC: %s: %s", l.scope, msg)) } @@ -1895,6 +2006,48 @@ func (l *pionLogger) Errorf(format string, args ...interface{}) { l.logger.WithTrace().Error(fmt.Sprintf("webRTC: %s: %s", l.scope, fmt.Sprintf(format, args...))) } +func hasInterfaceForPrivateIPAddress(IP net.IP) bool { + + if !IP.IsPrivate() { + return false + } + + // The anet package is used to work around net.Interfaces not working on + // Android at this time: https://github.com/golang/go/issues/40569. + // + // Any errors are silently dropped; the caller will proceed without using + // the input private IP; and equivilent anet calls are made in + // pionNetwork.Interfaces, with errors logged. + + netInterfaces, err := anet.Interfaces() + if err != nil { + return false + } + + for _, netInterface := range netInterfaces { + // Note: don't exclude interfaces with the net.FlagPointToPoint flag, + // which is set for certain mobile networks + if netInterface.Flags&net.FlagUp == 0 { + continue + } + addrs, err := anet.InterfaceAddrsByInterface(&netInterface) + if err != nil { + continue + } + for _, addr := range addrs { + _, IPNet, err := net.ParseCIDR(addr.String()) + if err != nil { + continue + } + if IPNet.Contains(IP) { + return true + } + } + } + + return false +} + // pionNetwork implements pion/transport.Net. // // Via the SettingsEngine, pion is configured to use a pionNetwork instance, @@ -1936,9 +2089,6 @@ func (p *pionNetwork) Interfaces() ([]*transport.Interface, error) { // should be the active, externally routable addresses, and the IPv6 // address should be the preferred, non-deprecated temporary IPv6 address. // - // The anet package is used to work around net.Interfaces not working on - // Android at this time: https://github.com/golang/go/issues/40569. - // // In post-ICE gathering processing, processSDPAddresses will also strip // all bogon addresses, so there is no explicit bogon check here. // @@ -1971,10 +2121,12 @@ func (p *pionNetwork) Interfaces() ([]*transport.Interface, error) { udpConnIPv6.Close() } + // The anet package is used to work around net.Interfaces not working on + // Android at this time: https://github.com/golang/go/issues/40569. + transportInterfaces := []*transport.Interface{} netInterfaces, err := anet.Interfaces() - if err != nil { return nil, errors.Trace(err) } diff --git a/psiphon/common/parameters/parameters.go b/psiphon/common/parameters/parameters.go index 48d4e2b3e..18ccb36a1 100644 --- a/psiphon/common/parameters/parameters.go +++ b/psiphon/common/parameters/parameters.go @@ -375,8 +375,11 @@ const ( InproxyTunnelProtocolSelectionProbability = "InproxyTunnelProtocolSelectionProbability" InproxyAllBrokerPublicKeys = "InproxyAllBrokerPublicKeys" InproxyBrokerSpecs = "InproxyBrokerSpecs" + InproxyPersonalPairingBrokerSpecs = "InproxyPersonalPairingBrokerSpecs" InproxyProxyBrokerSpecs = "InproxyProxyBrokerSpecs" + InproxyProxyPersonalPairingBrokerSpecs = "InproxyProxyPersonalPairingBrokerSpecs" InproxyClientBrokerSpecs = "InproxyClientBrokerSpecs" + InproxyClientPersonalPairingBrokerSpecs = "InproxyClientPersonalPairingBrokerSpecs" InproxyReplayBrokerDialParametersTTL = "InproxyReplayBrokerDialParametersTTL" InproxyReplayBrokerUpdateFrequency = "InproxyReplayBrokerUpdateFrequency" InproxyReplayBrokerDialParametersProbability = "InproxyReplayBrokerDialParametersProbability" @@ -434,6 +437,9 @@ const ( InproxyProxyDestinationDialTimeout = "InproxyProxyDestinationDialTimeout" InproxyPsiphonAPIRequestTimeout = "InproxyPsiphonAPIRequestTimeout" InproxyProxyTotalActivityNoticePeriod = "InproxyProxyTotalActivityNoticePeriod" + InproxyPersonalPairingConnectionWorkerPoolSize = "InproxyPersonalPairingConnectionWorkerPoolSize" + InproxyClientDialRateLimitQuantity = "InproxyClientDialRateLimitQuantity" + InproxyClientDialRateLimitInterval = "InproxyClientDialRateLimitInterval" // Retired parameters @@ -867,8 +873,11 @@ var defaultParameters = map[string]struct { InproxyTunnelProtocolSelectionProbability: {value: 0.5, minimum: 0.0}, InproxyAllBrokerPublicKeys: {value: []string{}, flags: serverSideOnly}, InproxyBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, + InproxyPersonalPairingBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, InproxyProxyBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, + InproxyProxyPersonalPairingBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, InproxyClientBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, + InproxyClientPersonalPairingBrokerSpecs: {value: InproxyBrokerSpecsValue{}}, InproxyReplayBrokerDialParametersTTL: {value: 24 * time.Hour, minimum: time.Duration(0)}, InproxyReplayBrokerUpdateFrequency: {value: 5 * time.Minute, minimum: time.Duration(0)}, InproxyReplayBrokerDialParametersProbability: {value: 1.0, minimum: 0.0}, @@ -882,7 +891,7 @@ var defaultParameters = map[string]struct { InproxyBrokerMatcherAnnouncementNonlimitedProxyIDs: {value: []string{}, flags: serverSideOnly}, InproxyBrokerMatcherOfferLimitEntryCount: {value: 10, minimum: 0, flags: serverSideOnly}, InproxyBrokerMatcherOfferRateLimitQuantity: {value: 50, minimum: 0, flags: serverSideOnly}, - InproxyBrokerMatcherOfferRateLimitInterval: {value: 1 * time.Minute, minimum: time.Duration(0)}, + InproxyBrokerMatcherOfferRateLimitInterval: {value: 1 * time.Minute, minimum: time.Duration(0), flags: serverSideOnly}, InproxyBrokerProxyAnnounceTimeout: {value: 2 * time.Minute, minimum: time.Duration(0), flags: serverSideOnly}, InproxyBrokerClientOfferTimeout: {value: 10 * time.Second, minimum: time.Duration(0), flags: serverSideOnly}, InproxyBrokerPendingServerRequestsTTL: {value: 60 * time.Second, minimum: time.Duration(0), flags: serverSideOnly}, @@ -926,6 +935,9 @@ var defaultParameters = map[string]struct { InproxyProxyDestinationDialTimeout: {value: 20 * time.Second, minimum: time.Duration(0), flags: useNetworkLatencyMultiplier}, InproxyPsiphonAPIRequestTimeout: {value: 10 * time.Second, minimum: 1 * time.Second, flags: useNetworkLatencyMultiplier}, InproxyProxyTotalActivityNoticePeriod: {value: 5 * time.Minute, minimum: 1 * time.Second}, + InproxyPersonalPairingConnectionWorkerPoolSize: {value: 2, minimum: 1}, + InproxyClientDialRateLimitQuantity: {value: 10, minimum: 0}, + InproxyClientDialRateLimitInterval: {value: 1 * time.Minute, minimum: time.Duration(0)}, } // IsServerSideOnly indicates if the parameter specified by name is used diff --git a/psiphon/common/protocol/packed.go b/psiphon/common/protocol/packed.go index 8c1bc2f7f..8d2b2ecdc 100644 --- a/psiphon/common/protocol/packed.go +++ b/psiphon/common/protocol/packed.go @@ -792,7 +792,10 @@ func init() { {142, "statusData", rawJSONConverter}, - // Last key value = 142 + {143, "inproxy_webrtc_local_ice_candidate_is_private_IP", intConverter}, + {144, "inproxy_webrtc_remote_ice_candidate_is_private_IP", intConverter}, + + // Next key value = 145 } for _, spec := range packedAPIParameterSpecs { diff --git a/psiphon/common/protocol/protocol.go b/psiphon/common/protocol/protocol.go index e86046ea5..2abf2e157 100644 --- a/psiphon/common/protocol/protocol.go +++ b/psiphon/common/protocol/protocol.go @@ -146,7 +146,7 @@ func (t TunnelProtocols) PruneInvalid() TunnelProtocols { return u } -func (t TunnelProtocols) OnlyInproxyTunnelProtocols() TunnelProtocols { +func (t TunnelProtocols) PruneNonInproxyTunnelProtocols() TunnelProtocols { u := make(TunnelProtocols, 0) for _, p := range t { if TunnelProtocolUsesInproxy(p) { @@ -156,6 +156,15 @@ func (t TunnelProtocols) OnlyInproxyTunnelProtocols() TunnelProtocols { return u } +func (t TunnelProtocols) IsOnlyInproxyTunnelProtocols() bool { + for _, p := range t { + if !TunnelProtocolUsesInproxy(p) { + return false + } + } + return true +} + type LabeledTunnelProtocols map[string]TunnelProtocols func (labeledProtocols LabeledTunnelProtocols) Validate() error { diff --git a/psiphon/common/reloader.go b/psiphon/common/reloader.go index f21ae7580..99dae3998 100644 --- a/psiphon/common/reloader.go +++ b/psiphon/common/reloader.go @@ -62,11 +62,11 @@ type Reloader interface { // // reloadAction must ensure that data structures revert to their previous state when // a reload fails. -// type ReloadableFile struct { sync.RWMutex filename string loadFileContent bool + hasLoaded bool checksum uint64 reloadAction func([]byte, time.Time) error } @@ -122,6 +122,7 @@ func (reloadable *ReloadableFile) Reload() (bool, error) { reloadable.RLock() filename := reloadable.filename + hasLoaded := reloadable.hasLoaded previousChecksum := reloadable.checksum reloadable.RUnlock() @@ -148,7 +149,7 @@ func (reloadable *ReloadableFile) Reload() (bool, error) { checksum := hash.Sum64() - if checksum == previousChecksum { + if hasLoaded && checksum == previousChecksum { return false, nil } @@ -181,6 +182,7 @@ func (reloadable *ReloadableFile) Reload() (bool, error) { return false, errors.Trace(err) } + reloadable.hasLoaded = true reloadable.checksum = checksum return true, nil diff --git a/psiphon/common/tactics/tactics.go b/psiphon/common/tactics/tactics.go index 58c711eb2..cc0503a27 100644 --- a/psiphon/common/tactics/tactics.go +++ b/psiphon/common/tactics/tactics.go @@ -112,12 +112,6 @@ tactics. Each time the tactics changes, this process is repeated so that obsolete tactics parameters are not retained in the client's Parameters instance. -Tactics has a probability parameter that is used in a weighted coin flip to -determine if the tactics is to be applied or skipped for the current client -session. This allows for experimenting with provisional tactics; and obtaining -non-tactic sample metrics in situations which would otherwise always use a -tactic. - Speed test data is used in filtered tactics for selection of parameters such as timeouts. @@ -217,8 +211,8 @@ var ( // matching filter are merged into the client tactics. // // The merge operation replaces any existing item in Parameter with a Parameter specified in -// the newest matching tactics. The TTL and Probability of the newest matching tactics is taken, -// although all but the DefaultTactics can omit the TTL and Probability fields. +// the newest matching tactics. The TTL of the newest matching tactics is taken, although all +// but the DefaultTactics can omit the TTL field. type Server struct { common.ReloadableFile @@ -232,7 +226,7 @@ type Server struct { RequestObfuscatedKey []byte // DefaultTactics is the baseline tactics for all clients. It must include a - // TTL and Probability. + // TTL. DefaultTactics Tactics // FilteredTactics is an ordered list of filter/tactics pairs. For a client, @@ -363,8 +357,10 @@ type Tactics struct { // no tactics data when the tag is unchanged. TTL string - // Probability specifies the probability [0.0 - 1.0] with which - // the client should apply the tactics in a new session. + // Probability is an obsolete field which is no longer used, as overall + // tactics are now applied unconditionally; but it must be present, and + // greater than zero, in marshaled tactics, sent by the server, for + // compatibility with legacy client tactics validation. Probability float64 // Parameters specify client parameters to override. These must @@ -540,13 +536,6 @@ func (server *Server) Validate() error { tactics.TTL = "" } - if (validatingDefault && tactics.Probability == 0.0) || - tactics.Probability < 0.0 || - tactics.Probability > 1.0 { - - return errors.TraceNew("invalid probability") - } - params, err := parameters.NewParameters(nil) if err != nil { return errors.Trace(err) @@ -960,6 +949,9 @@ func (server *Server) GetTactics( // Continue to apply more matches. Last matching tactics has priority for any field. } + // See Tactics.Probability doc comment. + tactics.Probability = 1.0 + return tactics, nil } @@ -1079,8 +1071,7 @@ func medianSampleRTTMilliseconds(samples []SpeedTestSample) int { func (t *Tactics) clone(includeServerSideOnly bool) *Tactics { u := &Tactics{ - TTL: t.TTL, - Probability: t.Probability, + TTL: t.TTL, } // Note: there is no deep copy of parameter values; the the returned @@ -1104,10 +1095,6 @@ func (t *Tactics) merge(includeServerSideOnly bool, u *Tactics) { t.TTL = u.TTL } - if u.Probability != 0.0 { - t.Probability = u.Probability - } - // Note: there is no deep copy of parameter values; the the returned // Tactics shares memory with the original and its individual parameters // should not be modified. @@ -1744,9 +1731,6 @@ func applyTacticsPayload( if ttl <= 0 { return newTactics, errors.TraceNew("invalid TTL") } - if record.Tactics.Probability <= 0.0 { - return newTactics, errors.TraceNew("invalid probability") - } // Set or extend the expiry. diff --git a/psiphon/common/tactics/tactics_test.go b/psiphon/common/tactics/tactics_test.go index ff268d277..de189b82e 100644 --- a/psiphon/common/tactics/tactics_test.go +++ b/psiphon/common/tactics/tactics_test.go @@ -56,7 +56,6 @@ func TestTactics(t *testing.T) { "RequestObfuscatedKey" : "%s", "DefaultTactics" : { "TTL" : "1s", - "Probability" : %0.1f, "Parameters" : { "NetworkLatencyMultiplier" : %0.1f, "ServerPacketManipulationSpecs" : [{"Name": "test-packetman-spec", "PacketSpecs": [["TCP-flags S"]]}] @@ -126,7 +125,6 @@ func TestTactics(t *testing.T) { t.Fatalf("GenerateKeys failed: %s", err) } - tacticsProbability := 0.5 tacticsNetworkLatencyMultiplier := 2.0 tacticsConnectionWorkerPoolSize := 5 tacticsLimitTunnelProtocols := protocol.TunnelProtocols{"OSSH", "SSH"} @@ -139,7 +137,6 @@ func TestTactics(t *testing.T) { encodedRequestPublicKey, encodedRequestPrivateKey, encodedObfuscatedKey, - tacticsProbability, tacticsNetworkLatencyMultiplier, tacticsConnectionWorkerPoolSize, jsonTacticsLimitTunnelProtocols, @@ -300,10 +297,6 @@ func TestTactics(t *testing.T) { t.Fatalf("NewParameters failed: %s", err) } - if r.Tactics.Probability != tacticsProbability { - t.Fatalf("Unexpected probability: %f", r.Tactics.Probability) - } - // ValidationSkipOnError is set for Psiphon clients counts, err := p.Set(r.Tag, parameters.ValidationSkipOnError, r.Tactics.Parameters) if err != nil { @@ -462,7 +455,6 @@ func TestTactics(t *testing.T) { encodedRequestPublicKey, encodedRequestPrivateKey, encodedObfuscatedKey, - tacticsProbability, tacticsNetworkLatencyMultiplier, tacticsConnectionWorkerPoolSize, jsonTacticsLimitTunnelProtocols, @@ -689,7 +681,6 @@ func TestTactics(t *testing.T) { "", "", "", - tacticsProbability, tacticsNetworkLatencyMultiplier, tacticsConnectionWorkerPoolSize, jsonTacticsLimitTunnelProtocols, @@ -741,8 +732,7 @@ func TestTacticsFilterGeoIPScope(t *testing.T) { "RequestPrivateKey" : "%s", "RequestObfuscatedKey" : "%s", "DefaultTactics" : { - "TTL" : "60s", - "Probability" : 1.0 + "TTL" : "60s" }, %%s } diff --git a/psiphon/config.go b/psiphon/config.go index b5e30e333..8d5e11736 100755 --- a/psiphon/config.go +++ b/psiphon/config.go @@ -34,6 +34,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "unicode" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" @@ -226,7 +227,7 @@ type Config struct { EstablishTunnelServerAffinityGracePeriodMilliseconds *int // ConnectionWorkerPoolSize specifies how many connection attempts to - // attempt in parallel. If omitted of when 0, a default is used; this is + // attempt in parallel. If omitted or when 0, a default is used; this is // recommended. ConnectionWorkerPoolSize int @@ -648,7 +649,9 @@ type Config struct { // distributed from proxy operators to client users out-of-band and // provide a mechanism to allow only certain clients to use a proxy. // - // See InproxyClientPersonalCompartmentIDs comment for limitations. + // Limitation: currently, at most 1 personal compartment may be specified. + // See InproxyClientPersonalCompartmentIDs comment for additional + // personal pairing limitations. InproxyProxyPersonalCompartmentIDs []string // InproxyClientPersonalCompartmentIDs specifies the personal compartment @@ -663,43 +666,28 @@ type Config struct { // // Limitations: // - // - While fully functional, the personal pairing mode has a number of - // limitations that make the current implementation less suitable for - // large scale deployment. + // While fully functional, the personal pairing mode has a number of + // limitations that make the current implementation less suitable for + // large scale deployment. // - // - Since the mode requires an in-proxy connection to a proxy, announcing - // with the corresponding personal compartment ID, not only must that - // proxy be available, but also a broker, and both the client and proxy - // must rendezvous at the same broker. + // Since the mode requires an in-proxy connection to a proxy, announcing + // with the corresponding personal compartment ID, not only must that + // proxy be available, but also a broker, and both the client and proxy + // must rendezvous at the same broker. // - // - Currently, the client tunnel establishment algorithm does not launch - // an untunneled tactics request as long as there is a cached tactics - // with a valid TTL. The assumption, in regular mode, is that the - // cached tactics will suffice, and any new tactics will be obtained - // from any Psiphon server connection. Since broker specs are obtained - // solely from tactics, if brokers are removed, reconfigured, or even - // if the order is changed, personal mode may fail to connect until - // cached tactics expire. - // - // - In personal mode, clients and proxies use a simplistic approach to - // rendezvous: always select the first broker spec. This works, but is - // not robust in terms of load balancing, and fails if the first broker - // is unreachable or overloaded. Non-personal in-proxy dials can simply - // use any available broker. - // - // - The broker matching queues lack compartment ID indexing. For a - // handful of common compartment IDs, this is not expected to be an - // issue. For personal compartment IDs, this may lead to frequency - // near-full scans of the queues when looking for a match. - // - // - In personal mode, all establishment candidates must be in-proxy - // dials, all using the same broker. Many concurrent, fronted broker - // requests may result in CDN rate limiting, requiring some mechanism - // to delay or spread the requests, as is currently done only for - // batches of proxy announcements. + // In personal mode, clients and proxies use a simplistic approach to + // rendezvous: always select the first broker spec. This works, but is + // not robust in terms of load balancing, and fails if the first broker + // is unreachable or overloaded. Non-personal in-proxy dials can simply + // use any available broker. // InproxyClientPersonalCompartmentIDs []string + // InproxyPersonalPairingConnectionWorkerPoolSize specifies the value for + // ConnectionWorkerPoolSize in personal pairing mode. If omitted or when + // 0, a default is used; this is recommended. + InproxyPersonalPairingConnectionWorkerPoolSize int + // EmitInproxyProxyActivity indicates whether to emit frequent notices // showing proxy connection information and bytes transferred. EmitInproxyProxyActivity bool @@ -1002,8 +990,11 @@ type Config struct { InproxyAllowClient *bool InproxyTunnelProtocolSelectionProbability *float64 InproxyBrokerSpecs parameters.InproxyBrokerSpecsValue - InproxyClientBrokerSpecs parameters.InproxyBrokerSpecsValue + InproxyPersonalPairingBrokerSpecs parameters.InproxyBrokerSpecsValue InproxyProxyBrokerSpecs parameters.InproxyBrokerSpecsValue + InproxyProxyPersonalPairingBrokerSpecs parameters.InproxyBrokerSpecsValue + InproxyClientBrokerSpecs parameters.InproxyBrokerSpecsValue + InproxyClientPersonalPairingBrokerSpecs parameters.InproxyBrokerSpecsValue InproxyReplayBrokerDialParametersTTLSeconds *int InproxyReplayBrokerUpdateFrequencySeconds *int InproxyReplayBrokerDialParametersProbability *float64 @@ -1048,6 +1039,8 @@ type Config struct { InproxyProxyDestinationDialTimeoutMilliseconds *int InproxyPsiphonAPIRequestTimeoutMilliseconds *int InproxyProxyTotalActivityNoticePeriodMilliseconds *int + InproxyClientDialRateLimitQuantity *int + InproxyClientDialRateLimitIntervalMilliseconds *int InproxySkipAwaitFullyConnected bool InproxyEnableWebRTCDebugLogging bool @@ -1080,6 +1073,10 @@ type Config struct { tacticsAppliedReceiversMutex sync.Mutex tacticsAppliedReceivers []TacticsAppliedReceiver + + signalComponentFailure atomic.Value + + inproxyMustUpgradePosted int32 } // TacticsAppliedReceiver specifies the interface for a component that is @@ -1121,6 +1118,8 @@ func LoadConfig(configJson []byte) (*Config, error) { config.loadTimestamp = common.TruncateTimestampToHour( common.GetCurrentTimestamp()) + config.signalComponentFailure.Store(func() {}) + return &config, nil } @@ -1408,7 +1407,9 @@ func (config *Config) Commit(migrateFromLegacyFields bool) error { return errors.TraceNew("invalid ObfuscatedSSHAlgorithms") } - if !config.DisableTunnels && config.InproxyEnableProxy && + if !config.DisableTunnels && + config.InproxyEnableProxy && + !GetAllowOverlappingPersonalCompartmentIDs() && common.ContainsAny( config.InproxyProxyPersonalCompartmentIDs, config.InproxyClientPersonalCompartmentIDs) { @@ -1417,6 +1418,10 @@ func (config *Config) Commit(migrateFromLegacyFields bool) error { return errors.TraceNew("invalid overlapping personal compartment IDs") } + if len(config.InproxyProxyPersonalCompartmentIDs) > 1 { + return errors.TraceNew("invalid proxy personal compartment ID count") + } + // This constraint is expected by logic in Controller.runTunnels(). if config.PacketTunnelTunFileDescriptor > 0 && config.TunnelPoolSize != 1 { @@ -1786,6 +1791,36 @@ func (config *Config) GetNetworkID() string { return config.networkIDGetter.GetNetworkID() } +func (config *Config) SetSignalComponentFailure(signalComponentFailure func()) { + config.signalComponentFailure.Store(signalComponentFailure) +} + +// IsInproxyPersonalPairingMode indicates that the client is in in-proxy +// personal pairing mode, where connections are made only through in-proxy +// proxies with corresponding personal compartment IDs. +func (config *Config) IsInproxyPersonalPairingMode() bool { + return len(config.InproxyClientPersonalCompartmentIDs) > 0 +} + +// OnInproxyMustUpgrade is invoked when the in-proxy broker returns the +// MustUpgrade response. When either running a proxy, or when running a +// client in personal-pairing mode -- two states that require in-proxy +// functionality -- onInproxyMustUpgrade initiates a shutdown after emitting +// the InproxyMustUpgrade notice. +func (config *Config) OnInproxyMustUpgrade() { + + // TODO: check if LimitTunnelProtocols is set to allow only INPROXY tunnel + // protocols; this is another case where in-proxy functionality is + // required. + + if config.InproxyEnableProxy || config.IsInproxyPersonalPairingMode() { + if atomic.CompareAndSwapInt32(&config.inproxyMustUpgradePosted, 0, 1) { + NoticeInproxyMustUpgrade() + } + config.signalComponentFailure.Load().(func())() + } +} + func (config *Config) makeConfigParameters() map[string]interface{} { // Build set of config values to apply to parameters. @@ -2375,6 +2410,10 @@ func (config *Config) makeConfigParameters() map[string]interface{} { applyParameters[parameters.SteeringIPProbability] = *config.SteeringIPProbability } + if config.InproxyPersonalPairingConnectionWorkerPoolSize != 0 { + applyParameters[parameters.InproxyPersonalPairingConnectionWorkerPoolSize] = config.InproxyPersonalPairingConnectionWorkerPoolSize + } + if config.InproxyAllowProxy != nil { applyParameters[parameters.InproxyAllowProxy] = *config.InproxyAllowProxy } @@ -2391,14 +2430,26 @@ func (config *Config) makeConfigParameters() map[string]interface{} { applyParameters[parameters.InproxyBrokerSpecs] = config.InproxyBrokerSpecs } + if len(config.InproxyPersonalPairingBrokerSpecs) > 0 { + applyParameters[parameters.InproxyPersonalPairingBrokerSpecs] = config.InproxyPersonalPairingBrokerSpecs + } + if len(config.InproxyProxyBrokerSpecs) > 0 { applyParameters[parameters.InproxyProxyBrokerSpecs] = config.InproxyProxyBrokerSpecs } + if len(config.InproxyProxyPersonalPairingBrokerSpecs) > 0 { + applyParameters[parameters.InproxyProxyPersonalPairingBrokerSpecs] = config.InproxyProxyPersonalPairingBrokerSpecs + } + if len(config.InproxyClientBrokerSpecs) > 0 { applyParameters[parameters.InproxyClientBrokerSpecs] = config.InproxyClientBrokerSpecs } + if len(config.InproxyClientPersonalPairingBrokerSpecs) > 0 { + applyParameters[parameters.InproxyClientPersonalPairingBrokerSpecs] = config.InproxyClientPersonalPairingBrokerSpecs + } + if config.InproxyReplayBrokerDialParametersTTLSeconds != nil { applyParameters[parameters.InproxyReplayBrokerDialParametersTTL] = fmt.Sprintf("%ds", *config.InproxyReplayBrokerDialParametersTTLSeconds) } @@ -2575,6 +2626,14 @@ func (config *Config) makeConfigParameters() map[string]interface{} { applyParameters[parameters.InproxyProxyTotalActivityNoticePeriod] = fmt.Sprintf("%dms", *config.InproxyProxyTotalActivityNoticePeriodMilliseconds) } + if config.InproxyClientDialRateLimitQuantity != nil { + applyParameters[parameters.InproxyClientDialRateLimitQuantity] = *config.InproxyClientDialRateLimitQuantity + } + + if config.InproxyClientDialRateLimitIntervalMilliseconds != nil { + applyParameters[parameters.InproxyClientDialRateLimitInterval] = fmt.Sprintf("%dms", *config.InproxyClientDialRateLimitIntervalMilliseconds) + } + // When adding new config dial parameters that may override tactics, also // update setDialParametersHash. @@ -3183,14 +3242,26 @@ func (config *Config) setDialParametersHash() { hash.Write([]byte("InproxyBrokerSpecs")) hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyBrokerSpecs))) } + if len(config.InproxyPersonalPairingBrokerSpecs) > 0 { + hash.Write([]byte("InproxyPersonalPairingBrokerSpecs")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyPersonalPairingBrokerSpecs))) + } if len(config.InproxyProxyBrokerSpecs) > 0 { hash.Write([]byte("InproxyProxyBrokerSpecs")) hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyProxyBrokerSpecs))) } + if len(config.InproxyProxyPersonalPairingBrokerSpecs) > 0 { + hash.Write([]byte("InproxyProxyPersonalPairingBrokerSpecs")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyProxyPersonalPairingBrokerSpecs))) + } if len(config.InproxyClientBrokerSpecs) > 0 { hash.Write([]byte("InproxyClientBrokerSpecs")) hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyClientBrokerSpecs))) } + if len(config.InproxyClientPersonalPairingBrokerSpecs) > 0 { + hash.Write([]byte("InproxyClientPersonalPairingBrokerSpecs")) + hash.Write([]byte(fmt.Sprintf("%+v", config.InproxyClientPersonalPairingBrokerSpecs))) + } if config.InproxyReplayBrokerDialParametersTTLSeconds != nil { hash.Write([]byte("InproxyReplayBrokerDialParametersTTLSeconds")) binary.Write(hash, binary.LittleEndian, int64(*config.InproxyReplayBrokerDialParametersTTLSeconds)) diff --git a/psiphon/controller.go b/psiphon/controller.go index 9d8997cd1..733f04952 100755 --- a/psiphon/controller.go +++ b/psiphon/controller.go @@ -44,6 +44,7 @@ import ( "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tun" lrucache "github.com/cognusion/go-cache-lru" + "golang.org/x/time/rate" ) // Controller is a tunnel lifecycle coordinator. It manages lists of servers to @@ -96,6 +97,8 @@ type Controller struct { inproxyNATStateManager *InproxyNATStateManager inproxyHandleTacticsMutex sync.Mutex inproxyLastStoredTactics time.Time + establishSignalForceTacticsFetch chan struct{} + inproxyClientDialRateLimiter *rate.Limiter } // NewController initializes a new controller. @@ -116,7 +119,7 @@ func NewController(config *Config) (controller *Controller, err error) { // ensures no tactics request is attempted now. doneContext, cancelFunc := context.WithCancel(context.Background()) cancelFunc() - GetTactics(doneContext, config) + GetTactics(doneContext, config, true) p := config.GetParameters().Get() splitTunnelClassificationTTL := @@ -257,6 +260,7 @@ func NewController(config *Config) (controller *Controller, err error) { } controller.config.SetTacticsAppliedReceivers(tacticAppliedReceivers) + controller.config.SetSignalComponentFailure(controller.SignalComponentFailure) return controller, nil } @@ -1505,6 +1509,8 @@ type protocolSelectionConstraints struct { limitTunnelDialPortNumbers protocol.TunnelProtocolPortLists limitQUICVersions protocol.QUICVersions replayCandidateCount int + isInproxyPersonalPairingMode bool + inproxyClientDialRateLimiter *rate.Limiter } func (p *protocolSelectionConstraints) hasInitialProtocols() bool { @@ -1556,24 +1562,30 @@ func (p *protocolSelectionConstraints) canReplay( replayProtocol) } -func (p *protocolSelectionConstraints) supportedProtocols( - connectTunnelCount int, - excludeIntensive bool, - excludeInproxy bool, - serverEntry *protocol.ServerEntry) []string { +func (p *protocolSelectionConstraints) getLimitTunnelProtocols( + connectTunnelCount int) protocol.TunnelProtocols { - limitTunnelProtocols := p.limitTunnelProtocols + protocols := p.limitTunnelProtocols if len(p.initialLimitTunnelProtocols) > 0 && p.initialLimitTunnelProtocolsCandidateCount > connectTunnelCount { - limitTunnelProtocols = p.initialLimitTunnelProtocols + protocols = p.initialLimitTunnelProtocols } + return protocols +} + +func (p *protocolSelectionConstraints) supportedProtocols( + connectTunnelCount int, + excludeIntensive bool, + excludeInproxy bool, + serverEntry *protocol.ServerEntry) []string { + return serverEntry.GetSupportedProtocols( conditionallyEnabledComponents{}, p.useUpstreamProxy, - limitTunnelProtocols, + p.getLimitTunnelProtocols(connectTunnelCount), p.limitTunnelDialPortNumbers, p.limitQUICVersions, excludeIntensive, @@ -1584,13 +1596,13 @@ func (p *protocolSelectionConstraints) selectProtocol( connectTunnelCount int, excludeIntensive bool, excludeInproxy bool, - serverEntry *protocol.ServerEntry) (string, bool) { + serverEntry *protocol.ServerEntry) (string, time.Duration, bool) { candidateProtocols := p.supportedProtocols( connectTunnelCount, excludeIntensive, excludeInproxy, serverEntry) if len(candidateProtocols) == 0 { - return "", false + return "", 0, false } // Pick at random from the supported protocols. This ensures that we'll @@ -1599,9 +1611,62 @@ func (p *protocolSelectionConstraints) selectProtocol( // through multi-capability servers, and a simpler ranked preference of // protocols could lead to that protocol never being selected. - index := prng.Intn(len(candidateProtocols)) + selectedProtocol := candidateProtocols[prng.Intn(len(candidateProtocols))] + + if !protocol.TunnelProtocolUsesInproxy(selectedProtocol) || + p.inproxyClientDialRateLimiter == nil { + + return selectedProtocol, 0, true + } + + // Rate limit in-proxy dials. This avoids triggering rate limits or + // similar errors from any intermediate CDN between the client and the + // broker. And avoids unnecessarily triggering the broker's + // application-level rate limiter, which will incur some overhead logging + // an event and returning a response. + // + // In personal pairing mode, or when protocol limits yield only in-proxy + // tunnel protocol candidates, no non-in-proxy protocol can be selected, + // so delay the dial. In other cases, skip the candidate and pick a + // non-in-proxy tunnel protocol. + // + // The delay is not applied here since the caller is holding the + // concurrentEstablishTunnelsMutex lock, potentially blocking other + // establishment workers. Instead the delay is returned and applied + // outside of the lock. This also allows for the delay to be reduced when + // the StaggerConnectionWorkers facility is active. + + if p.isInproxyPersonalPairingMode || + p.getLimitTunnelProtocols(connectTunnelCount).IsOnlyInproxyTunnelProtocols() { - return candidateProtocols[index], true + r := p.inproxyClientDialRateLimiter.Reserve() + if !r.OK() { + NoticeInfo("in-proxy protocol selection rate limited: burst size exceeded") + return "", 0, false + } + delay := r.Delay() + if delay > 0 { + NoticeInfo("in-proxy protocol selection rate limited: %v", delay) + } + return selectedProtocol, delay, true + + } else if !p.inproxyClientDialRateLimiter.Allow() { + + NoticeInfo("in-proxy protocol selection skipped due to rate limit") + + excludeInproxy = true + + candidateProtocols = p.supportedProtocols( + connectTunnelCount, excludeIntensive, excludeInproxy, serverEntry) + + if len(candidateProtocols) == 0 { + return "", 0, false + } + + return candidateProtocols[prng.Intn(len(candidateProtocols))], 0, true + } + + return selectedProtocol, 0, true } type candidateServerEntry struct { @@ -1667,6 +1732,30 @@ func (controller *Controller) startEstablishing() { // controller.serverAffinityDoneBroadcast. controller.serverAffinityDoneBroadcast = make(chan struct{}) + // TODO: Add a buffer of 1 so we don't miss a signal while worker is + // starting? Trade-off is potential back-to-back fetches. As-is, + // establish will eventually signal another fetch. + controller.establishSignalForceTacticsFetch = make(chan struct{}) + + // Initialize the in-proxy client dial rate limiter. Rate limits are used in + // protocolSelectionConstraints.selectProtocol. When + // InproxyClientDialRateLimitQuantity is 0, there is no rate limit. + // + // The rate limiter is reset for each establishment, which ensures no + // delays carry over from a previous establishment run. However, this + // does mean that very frequent re-establishments may exceed the rate + // limit overall. + + p := controller.config.GetParameters().Get() + inproxyRateLimitQuantity := p.Int(parameters.InproxyClientDialRateLimitQuantity) + inproxyRateLimitInterval := p.Duration(parameters.InproxyClientDialRateLimitInterval) + if inproxyRateLimitQuantity > 0 { + controller.inproxyClientDialRateLimiter = rate.NewLimiter( + rate.Limit(float64(inproxyRateLimitQuantity)/inproxyRateLimitInterval.Seconds()), + inproxyRateLimitQuantity) + } + p.Close() + controller.establishWaitGroup.Add(1) go controller.launchEstablishing() } @@ -1675,8 +1764,9 @@ func (controller *Controller) launchEstablishing() { defer controller.establishWaitGroup.Done() - // Before starting the establish tunnel workers, get and apply - // tactics, launching a tactics request if required. + // Before starting the establish tunnel workers, get and apply tactics, + // launching a tactics request if required -- when there are no tactics, + // or the cached tactics have expired. // // Wait only TacticsWaitPeriod for the tactics request to complete (or // fail) before proceeding with tunnel establishment, in case the tactics @@ -1691,25 +1781,75 @@ func (controller *Controller) launchEstablishing() { // // Any in-flight tactics request or pending retry will be // canceled when establishment is stopped. + // + // In some cases, no tunnel establishment can succeed without a fresh + // tactics fetch, even if there is existing, non-expired cached tactics. + // Currently, cases include in-proxy personal pairing mode and limiting + // tunnel protocols to in-proxy, where broker specs are both required and + // obtained exclusively from tactics. It is possible that cached tactics + // are found and used, but broker configurations have recently changed + // away from the broker specs in cached tactics. + // + // Another scenario, with exclusively in-proxy tunnel protocols, is a + // fresh start with no embedded server entries, where the initial + // GetTactics will fail with "no capable servers". + // + // To handle these cases, when cached tactics are used or no tactics can + // be fetched, the tactics worker goroutine will remain running and await + // a signal to force a tactics fetch that ignores any stored/cached + // tactics. Multiple signals and fetch attempts are supported, to retry + // when a GetTactics fetch iteration fails, including the "no capable + // servers" case, which may only succeed after a concurrent server list + // fetch completes. + // + // Limitation: this mechanism doesn't force repeated tactics fetches after + // one success, which risks being excessive. There's at most one + // successful fetch per establishment run. As such, it remains remotely + // possible that a tactics change, such as new broker specs, deployed in + // the middle of an establishment run, won't be fetched. A user-initiated + // stop/start toggle will work around this. if !controller.config.DisableTactics { timeout := controller.config.GetParameters().Get().Duration( parameters.TacticsWaitPeriod) - tacticsDone := make(chan struct{}) + initialTacticsDone := make(chan struct{}) tacticsWaitPeriod := time.NewTimer(timeout) defer tacticsWaitPeriod.Stop() controller.establishWaitGroup.Add(1) go func() { defer controller.establishWaitGroup.Done() - defer close(tacticsDone) - GetTactics(controller.establishCtx, controller.config) + + useStoredTactics := true + fetched := GetTactics( + controller.establishCtx, controller.config, useStoredTactics) + close(initialTacticsDone) + + if fetched { + return + } + + for { + select { + case <-controller.establishCtx.Done(): + return + case <-controller.establishSignalForceTacticsFetch: + } + + useStoredTactics = false + fetched = GetTactics( + controller.establishCtx, controller.config, useStoredTactics) + if fetched { + // No more forced tactics fetches after the first success. + break + } + } }() select { - case <-tacticsDone: + case <-initialTacticsDone: case <-tacticsWaitPeriod.C: } @@ -1741,6 +1881,9 @@ func (controller *Controller) launchEstablishing() { p.TunnelProtocolPortLists(parameters.LimitTunnelDialPortNumbers)), replayCandidateCount: p.Int(parameters.ReplayCandidateCount), + + isInproxyPersonalPairingMode: controller.config.IsInproxyPersonalPairingMode(), + inproxyClientDialRateLimiter: controller.inproxyClientDialRateLimiter, } // Adjust protocol limits for in-proxy personal proxy mode. In this mode, @@ -1748,18 +1891,18 @@ func (controller *Controller) launchEstablishing() { // corresponding personal compartment ID, so non-in-proxy tunnel // protocols are disabled. - if len(controller.config.InproxyClientPersonalCompartmentIDs) > 0 { + if controller.config.IsInproxyPersonalPairingMode() { if len(controller.protocolSelectionConstraints.initialLimitTunnelProtocols) > 0 { controller.protocolSelectionConstraints.initialLimitTunnelProtocols = controller.protocolSelectionConstraints. - initialLimitTunnelProtocols.OnlyInproxyTunnelProtocols() + initialLimitTunnelProtocols.PruneNonInproxyTunnelProtocols() } if len(controller.protocolSelectionConstraints.limitTunnelProtocols) > 0 { controller.protocolSelectionConstraints.limitTunnelProtocols = controller.protocolSelectionConstraints. - limitTunnelProtocols.OnlyInproxyTunnelProtocols() + limitTunnelProtocols.PruneNonInproxyTunnelProtocols() } // This covers two cases: if there was no limitTunnelProtocols to @@ -1773,8 +1916,17 @@ func (controller *Controller) launchEstablishing() { } // ConnectionWorkerPoolSize may be set by tactics. + // + // In-proxy personal pairing mode uses a distinct parameter which is + // typically configured to a lower number, limiting concurrent load and + // announcement consumption for personal proxies. - workerPoolSize := p.Int(parameters.ConnectionWorkerPoolSize) + var workerPoolSize int + if controller.config.IsInproxyPersonalPairingMode() { + workerPoolSize = p.Int(parameters.InproxyPersonalPairingConnectionWorkerPoolSize) + } else { + workerPoolSize = p.Int(parameters.ConnectionWorkerPoolSize) + } // When TargetServerEntry is used, override any worker pool size config or // tactic parameter and use a pool size of 1. The typical use case for @@ -1950,6 +2102,8 @@ func (controller *Controller) stopEstablishing() { controller.establishWaitGroup = nil controller.candidateServerEntries = nil controller.serverAffinityDoneBroadcast = nil + controller.establishSignalForceTacticsFetch = nil + controller.inproxyClientDialRateLimiter = nil controller.concurrentEstablishTunnelsMutex.Lock() peakConcurrent := controller.peakConcurrentEstablishTunnels @@ -2141,11 +2295,56 @@ loop: // No fetches are triggered when TargetServerEntry is specified. In that // case, we're only trying to connect to a specific server entry. - if (candidateServerEntryCount == 0 || - time.Since(controller.establishStartTime)-totalNetworkWaitDuration > workTime) && - controller.config.TargetServerEntry == "" { + if candidateServerEntryCount == 0 || + time.Since(controller.establishStartTime)-totalNetworkWaitDuration > workTime { + + if controller.config.TargetServerEntry == "" { + controller.triggerFetches() + } + + // Trigger a forced tactics fetch. Currently, this is done only + // for cases where in-proxy tunnel protocols must be selected. + // When there were no server entries, wait until a server entry + // fetch has completed. - controller.triggerFetches() + // Lock required to access controller.establishConnectTunnelCount. + controller.concurrentEstablishTunnelsMutex.Lock() + limitInproxyOnly := controller.protocolSelectionConstraints.getLimitTunnelProtocols( + controller.establishConnectTunnelCount).IsOnlyInproxyTunnelProtocols() + controller.concurrentEstablishTunnelsMutex.Unlock() + + if limitInproxyOnly || controller.config.IsInproxyPersonalPairingMode() { + + // Simply sleep and poll for any imported server entries; + // perform one sleep after HasServerEntries, in order to give + // the import some extra time. Limitation: if the sleep loop + // ends too soon, the tactics fetch won't find a + // tactics-capable server entry; in this case, workTime must + // elapse before another tactics fetch is triggered. + // + // TODO: synchronize with server list fetch/import complete; + // or use ScanServerEntries (but see function comment about + // performance concern) to check for at least one + // tactics-capable server entry. + + if candidateServerEntryCount == 0 { + stopWaiting := false + for { + if HasServerEntries() { + stopWaiting = true + } + common.SleepWithContext(controller.establishCtx, 1*time.Second) + if stopWaiting || controller.establishCtx.Err() != nil { + break + } + } + } + + select { + case controller.establishSignalForceTacticsFetch <- struct{}{}: + default: + } + } } // After a complete iteration of candidate servers, pause before iterating again. @@ -2297,20 +2496,32 @@ loop: replayProtocol) } + // The dial rate limit delay, determined by protocolSelectionConstraints.selectProtocol, is + // not applied within that function since this worker holds the concurrentEstablishTunnelsMutex + // lock when that's called. Instead, the required delay is passed out and applied below. + // It's safe for the selectProtocol callback to write to dialRateLimitDelay without + // synchronization since this worker goroutine invokes the callback. + + var dialRateLimitDelay time.Duration + selectProtocol := func(serverEntry *protocol.ServerEntry) (string, bool) { // The in-proxy protocol selection probability allows for // tuning/limiting in-proxy usage independent of // LimitTunnelProtocol targeting. - onlyInproxy := len(controller.config.InproxyClientPersonalCompartmentIDs) > 0 + onlyInproxy := controller.config.IsInproxyPersonalPairingMode() includeInproxy := onlyInproxy || prng.FlipWeightedCoin(inproxySelectionProbability) - return controller.protocolSelectionConstraints.selectProtocol( + selectedProtocol, rateLimitDelay, ok := controller.protocolSelectionConstraints.selectProtocol( controller.establishConnectTunnelCount, excludeIntensive, !includeInproxy, serverEntry) + + dialRateLimitDelay = rateLimitDelay + + return selectedProtocol, ok } // MakeDialParameters may return a replay instance, if the server @@ -2390,6 +2601,8 @@ loop: controller.concurrentEstablishTunnelsMutex.Unlock() + startStagger := time.Now() + // Apply stagger only now that we're past MakeDialParameters and // protocol selection logic which may have caused the candidate to be // skipped. The stagger logic delays dialing, and we don't want to @@ -2412,6 +2625,15 @@ loop: controller.staggerMutex.Unlock() } + // Apply any dial rate limit delay now, after unlocking + // concurrentEstablishTunnelsMutex. The delay may be reduced by the + // time spent waiting to stagger. + + dialRateLimitDelay -= time.Since(startStagger) + if dialRateLimitDelay > 0 { + common.SleepWithContext(controller.establishCtx, dialRateLimitDelay) + } + // ConnectTunnel will allocate significant memory, so first attempt to // reclaim as much as possible. DoGarbageCollection() @@ -2510,7 +2732,7 @@ func (controller *Controller) runInproxyProxy() { // When not running client tunnel establishment, perform an OOB tactics // fetch, if required, here. - GetTactics(controller.runCtx, controller.config) + GetTactics(controller.runCtx, controller.config, true) } else if !controller.config.InproxySkipAwaitFullyConnected { @@ -2654,12 +2876,8 @@ func (controller *Controller) runInproxyProxy() { MaxClients: controller.config.InproxyMaxClients, LimitUpstreamBytesPerSecond: controller.config.InproxyLimitUpstreamBytesPerSecond, LimitDownstreamBytesPerSecond: controller.config.InproxyLimitDownstreamBytesPerSecond, - - OperatorMessageHandler: func(messageJSON string) { - NoticeInproxyOperatorMessage(messageJSON) - }, - - ActivityUpdater: activityUpdater, + MustUpgrade: controller.config.OnInproxyMustUpgrade, + ActivityUpdater: activityUpdater, } proxy, err := inproxy.NewProxy(config) @@ -2849,8 +3067,7 @@ func (controller *Controller) inproxyHandleProxyTacticsPayload( return false } - if tacticsRecord != nil && - prng.FlipWeightedCoin(tacticsRecord.Tactics.Probability) { + if tacticsRecord != nil { // SetParameters signals registered components, including broker // client and NAT state managers, that must reset upon tactics changes. diff --git a/psiphon/controller_test.go b/psiphon/controller_test.go index bd9e8c916..ccdb4c05a 100644 --- a/psiphon/controller_test.go +++ b/psiphon/controller_test.go @@ -39,6 +39,7 @@ import ( socks "github.com/Psiphon-Labs/goptlib" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common" + "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/inproxy" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol" "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/quic" "github.com/elazarl/goproxy" @@ -300,29 +301,37 @@ func TestFrontedQUIC(t *testing.T) { func TestInproxyOSSH(t *testing.T) { - t.Skipf("temporarily disabled") + if !inproxy.Enabled() { + t.Skip("In-proxy is not enabled") + } controllerRun(t, &controllerRunConfig{ protocol: "INPROXY-WEBRTC-OSSH", disableUntunneledUpgrade: true, + useInproxyDialRateLimit: true, }) } func TestInproxyQUICOSSH(t *testing.T) { - t.Skipf("temporarily disabled") + if !inproxy.Enabled() { + t.Skip("In-proxy is not enabled") + } controllerRun(t, &controllerRunConfig{ protocol: "INPROXY-WEBRTC-QUIC-OSSH", disableUntunneledUpgrade: true, + useInproxyDialRateLimit: true, }) } func TestInproxyUnfrontedMeekHTTPS(t *testing.T) { - t.Skipf("temporarily disabled") + if !inproxy.Enabled() { + t.Skip("In-proxy is not enabled") + } controllerRun(t, &controllerRunConfig{ @@ -333,7 +342,9 @@ func TestInproxyUnfrontedMeekHTTPS(t *testing.T) { func TestInproxyTLSOSSH(t *testing.T) { - t.Skipf("temporarily disabled") + if !inproxy.Enabled() { + t.Skip("In-proxy is not enabled") + } controllerRun(t, &controllerRunConfig{ @@ -372,6 +383,7 @@ type controllerRunConfig struct { transformHostNames bool useFragmentor bool useLegacyAPIEncoding bool + useInproxyDialRateLimit bool } func controllerRun(t *testing.T, runConfig *controllerRunConfig) { @@ -439,6 +451,11 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) { modifyConfig["TargetAPIEncoding"] = protocol.PSIPHON_API_ENCODING_JSON } + if runConfig.useInproxyDialRateLimit { + modifyConfig["InproxyClientDialRateLimitQuantity"] = 2 + modifyConfig["InproxyClientDialRateLimitIntervalMilliseconds"] = 1000 + } + configJSON, _ = json.Marshal(modifyConfig) config, err := LoadConfig(configJSON) diff --git a/psiphon/dataStore.go b/psiphon/dataStore.go index 60a2678c2..cdc3350d7 100644 --- a/psiphon/dataStore.go +++ b/psiphon/dataStore.go @@ -1302,7 +1302,7 @@ func deleteServerEntryHelper( // // ScanServerEntries may be slow to execute, particularly for older devices // and/or very large server lists. Callers should avoid blocking on -// ScanServerEntries where possible; and use the canel option to interrupt +// ScanServerEntries where possible; and use the cancel option to interrupt // scans that are no longer required. func ScanServerEntries(callback func(*protocol.ServerEntry) bool) error { diff --git a/psiphon/debug.go b/psiphon/debug.go new file mode 100644 index 000000000..d2f227208 --- /dev/null +++ b/psiphon/debug.go @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2024, Psiphon Inc. + * All rights reserved. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package psiphon + +import ( + "sync/atomic" +) + +var allowOverlappingPersonalCompartmentIDs int32 + +func GetAllowOverlappingPersonalCompartmentIDs() bool { + return atomic.LoadInt32(&allowOverlappingPersonalCompartmentIDs) == 1 +} + +// SetAllowOverlappingPersonalCompartmentIDs configures whether to allow +// overlapping personal compartment IDs in InproxyProxyPersonalCompartmentIDs +// and InproxyClientPersonalCompartmentIDs. Overlapping IDs are not allowed +// in order to prevent a client matching its own proxy. +// SetAllowOverlappingPersonalCompartmentIDs is for end-to-end testing on a +// single host, and should be used only for testing purposes. +func SetAllowOverlappingPersonalCompartmentIDs(allow bool) { + value := int32(0) + if allow { + value = 1 + } + atomic.StoreInt32(&allowOverlappingPersonalCompartmentIDs, value) +} + +var allowBogonWebRTCConnections int32 + +func GetAllowBogonWebRTCConnections() bool { + return atomic.LoadInt32(&allowBogonWebRTCConnections) == 1 +} + +// SetAllowBogonWebRTCConnections configures whether to allow bogon ICE +// candidates in WebRTC session descriptions. This included loopback and +// private network candidates. By default, bogon addresses are exclude as +// they are not expected to be useful and may expose private network +// information. SetAllowBogonWebRTCConnections is for end-to-end testing on a +// single host, and should be used only for testing purposes. +func SetAllowBogonWebRTCConnections(allow bool) { + value := int32(0) + if allow { + value = 1 + } + atomic.StoreInt32(&allowBogonWebRTCConnections, value) +} diff --git a/psiphon/dialParameters_test.go b/psiphon/dialParameters_test.go index 05df430e1..f4bf5e6a2 100644 --- a/psiphon/dialParameters_test.go +++ b/psiphon/dialParameters_test.go @@ -851,7 +851,8 @@ func TestLimitTunnelDialPortNumbers(t *testing.T) { } selectProtocol := func(serverEntry *protocol.ServerEntry) (string, bool) { - return constraints.selectProtocol(0, false, false, serverEntry) + protocol, _, ok := constraints.selectProtocol(0, false, false, serverEntry) + return protocol, ok } for _, tunnelProtocol := range protocol.SupportedTunnelProtocols { diff --git a/psiphon/feedback.go b/psiphon/feedback.go index 202c94afd..8e927ae3d 100644 --- a/psiphon/feedback.go +++ b/psiphon/feedback.go @@ -142,7 +142,7 @@ func SendFeedback(ctx context.Context, config *Config, diagnostics, uploadPath s // or a network ID of "VPN" if some other non-Psiphon VPN is running // (the caller should ensure a network ID of "VPN" in this case). - GetTactics(getTacticsCtx, config) + GetTactics(getTacticsCtx, config, true) // Get the latest client parameters p = config.GetParameters().Get() diff --git a/psiphon/inproxy.go b/psiphon/inproxy.go index 5455d4fc6..4c9dc2ad0 100644 --- a/psiphon/inproxy.go +++ b/psiphon/inproxy.go @@ -247,16 +247,39 @@ func NewInproxyBrokerClientInstance( return nil, errors.Trace(err) } - // Select the broker to use, optionally favoring brokers with replay - // data. + // Select the broker to use, optionally favoring brokers with replay data. + // In the InproxyBrokerSpecs calls, the first non-empty tactics parameter + // list is used. + // + // Optional broker specs may be used to specify broker(s) dedicated to + // personal pairing, a configuration which can be used to reserve more + // capacity for personal pairing, given the simple rendezvous scheme below. var brokerSpecs parameters.InproxyBrokerSpecsValue if isProxy { - brokerSpecs = p.InproxyBrokerSpecs( - parameters.InproxyProxyBrokerSpecs, parameters.InproxyBrokerSpecs) + if config.IsInproxyPersonalPairingMode() { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyProxyPersonalPairingBrokerSpecs, + parameters.InproxyPersonalPairingBrokerSpecs, + parameters.InproxyProxyBrokerSpecs, + parameters.InproxyBrokerSpecs) + } else { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyProxyBrokerSpecs, + parameters.InproxyBrokerSpecs) + } } else { - brokerSpecs = p.InproxyBrokerSpecs( - parameters.InproxyClientBrokerSpecs, parameters.InproxyBrokerSpecs) + if config.IsInproxyPersonalPairingMode() { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyClientPersonalPairingBrokerSpecs, + parameters.InproxyPersonalPairingBrokerSpecs, + parameters.InproxyClientBrokerSpecs, + parameters.InproxyBrokerSpecs) + } else { + brokerSpecs = p.InproxyBrokerSpecs( + parameters.InproxyClientBrokerSpecs, + parameters.InproxyBrokerSpecs) + } } if len(brokerSpecs) == 0 { return nil, errors.TraceNew("no broker specs") @@ -2076,7 +2099,8 @@ func (s *InproxyNATStateManager) TacticsApplied() error { func (s *InproxyNATStateManager) reset() { - // Assumes s.mutex lock is held. + s.mutex.Lock() + defer s.mutex.Unlock() networkID := s.config.GetNetworkID() diff --git a/psiphon/notice.go b/psiphon/notice.go index 4ed7d664a..3c59e1274 100644 --- a/psiphon/notice.go +++ b/psiphon/notice.go @@ -1097,12 +1097,16 @@ func NoticeSkipServerEntry(format string, args ...interface{}) { "SkipServerEntry", 0, "reason", reason) } -// NoticeInproxyOperatorMessage emits a message to be displayed to the proxy -// operator. -func NoticeInproxyOperatorMessage(messageJSON string) { +// NoticeInproxyMustUpgrade reports that an in-proxy component requires an app +// upgrade. Currently this includes running a proxy; and running a client in +// personal pairing mode. The receiver should alert the user to upgrade the +// app. +// +// There is at most one InproxyMustUpgrade notice emitted per controller run, +// and an InproxyMustUpgrade notice is followed by a tunnel-core shutdown. +func NoticeInproxyMustUpgrade() { singletonNoticeLogger.outputNotice( - "InproxyOperatorMessage", 0, - "message", messageJSON) + "InproxyMustUpgrade", 0) } // NoticeInproxyProxyActivity reports proxy usage statistics. The stats are diff --git a/psiphon/server/config.go b/psiphon/server/config.go index af482da91..96aa71f79 100644 --- a/psiphon/server/config.go +++ b/psiphon/server/config.go @@ -1129,8 +1129,7 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, []byt RequestPrivateKey: decodedTacticsRequestPrivateKey, RequestObfuscatedKey: decodedTacticsRequestObfuscatedKey, DefaultTactics: tactics.Tactics{ - TTL: "1m", - Probability: 1.0, + TTL: "1m", }, } diff --git a/psiphon/server/server_test.go b/psiphon/server/server_test.go index 3985a0df0..5ef5e8b48 100644 --- a/psiphon/server/server_test.go +++ b/psiphon/server/server_test.go @@ -416,6 +416,23 @@ func TestInproxyTLSOSSH(t *testing.T) { }) } +func TestInproxyPersonalPairing(t *testing.T) { + if !inproxy.Enabled() { + t.Skip("inproxy is not enabled") + } + runServer(t, + &runServerConfig{ + tunnelProtocol: "INPROXY-WEBRTC-OSSH", + requireAuthorization: true, + doTunneledWebRequest: true, + doTunneledNTPRequest: true, + doDanglingTCPConn: true, + doLogHostProvider: true, + doTargetBrokerSpecs: true, + doPersonalPairing: true, + }) +} + func TestHotReload(t *testing.T) { runServer(t, &runServerConfig{ @@ -654,6 +671,7 @@ type runServerConfig struct { doSteeringIP bool doTargetBrokerSpecs bool useLegacyAPIEncoding bool + doPersonalPairing bool } var ( @@ -1301,6 +1319,15 @@ func runServer(t *testing.T, runConfig *runServerConfig) { clientConfig.InproxyLimitDownstreamBytesPerSecond = 0 clientConfig.ServerEntrySignaturePublicKey = inproxyTestConfig.brokerServerEntrySignaturePublicKey + if runConfig.doPersonalPairing { + + psiphon.SetAllowOverlappingPersonalCompartmentIDs(true) + defer psiphon.SetAllowOverlappingPersonalCompartmentIDs(false) + + clientConfig.InproxyClientPersonalCompartmentIDs = []string{inproxyTestConfig.personalCompartmentID} + clientConfig.InproxyProxyPersonalCompartmentIDs = []string{inproxyTestConfig.personalCompartmentID} + } + // Simulate a CDN adding required HTTP headers by injecting them at // the client. headers := make(http.Header) @@ -2385,6 +2412,10 @@ func checkExpectedServerTunnelLogFields( return fmt.Errorf("unexpected inproxy_proxy_id '%s'", fields["inproxy_proxy_id"]) } + if fields["inproxy_matched_common_compartments"].(bool) != !runConfig.doPersonalPairing { + return fmt.Errorf("unexpected inproxy_matched_common_compartments '%s'", fields["inproxy_matched_common_compartments"]) + } + if fields["inproxy_broker_fronting_provider_id"].(string) != inproxyTestConfig.brokerFrontingProviderID { return fmt.Errorf("unexpected inproxy_broker_fronting_provider_id '%s'", fields["inproxy_broker_fronting_provider_id"]) } @@ -3390,6 +3421,8 @@ type inproxyTestConfig struct { proxySessionPublicKey string proxySessionPublicKeyCurve25519 string proxySessionPrivateKey string + + personalCompartmentID string } func generateInproxyTestConfig( @@ -3404,8 +3437,6 @@ func generateInproxyTestConfig( // In this test, a single common compartment ID is issued to all clients; // the test client will get it via tactics. // - // TODO: exercise personal compartment IDs - // // Because of singletons in the Psiphon client, there can only be a single // Psiphon client instance in this test process, and so it must act as // it's own in-proxy proxy. @@ -3425,6 +3456,12 @@ func generateInproxyTestConfig( } commonCompartmentIDStr := commonCompartmentID.String() + personalCompartmentID, err := inproxy.MakeID() + if err != nil { + return nil, errors.Trace(err) + } + personalCompartmentIDStr := personalCompartmentID.String() + brokerSessionPrivateKey, err := inproxy.GenerateSessionPrivateKey() if err != nil { return nil, errors.Trace(err) @@ -3589,6 +3626,7 @@ func generateInproxyTestConfig( proxySessionPublicKey: proxySessionPublicKeyStr, proxySessionPublicKeyCurve25519: proxySessionPublicKeyCurve25519Str, proxySessionPrivateKey: proxySessionPrivateKeyStr, + personalCompartmentID: personalCompartmentIDStr, } return config, nil diff --git a/psiphon/serverApi.go b/psiphon/serverApi.go index 2654c5bf7..a3b409ac7 100644 --- a/psiphon/serverApi.go +++ b/psiphon/serverApi.go @@ -403,8 +403,7 @@ func (serverContext *ServerContext) doHandshakeRequest(ignoreStatsRegexps bool) return errors.Trace(err) } - if tacticsRecord != nil && - prng.FlipWeightedCoin(tacticsRecord.Tactics.Probability) { + if tacticsRecord != nil { err := serverContext.tunnel.config.SetParameters( tacticsRecord.Tag, true, tacticsRecord.Tactics.Parameters) diff --git a/psiphon/tactics.go b/psiphon/tactics.go index c7913287a..c1aa33d99 100755 --- a/psiphon/tactics.go +++ b/psiphon/tactics.go @@ -49,7 +49,15 @@ import ( // and without blocking the Controller from starting. Accessing tactics is // most critical for untunneled network operations; when a Controller is // running, a tunnel may be used. See TacticsStorer for more details. -func GetTactics(ctx context.Context, config *Config) { +// +// When the useStoredTactics input flag is false, any locally cached tactics +// are ignored, regardless of TTL, and a fetch is always performed. GetTactics +// returns true when a fetch was performed and false otherwise (either cached +// tactics were found and applied, or there was a failure). This combination +// of useStoredTactics input and fetchedTactics output is used by the +// caller to force a fetch if one was not already performed to handle states +// where no tunnels can be established due to missing tactics. +func GetTactics(ctx context.Context, config *Config, useStoredTactics bool) (fetchedTactics bool) { // Limitation: GetNetworkID may not account for device VPN status, so // Psiphon-over-Psiphon or Psiphon-over-other-VPN scenarios can encounter @@ -61,16 +69,21 @@ func GetTactics(ctx context.Context, config *Config) { // network ID remains the same. Initial applied tactics will be for the // remote egress region/ISP, not the local region/ISP. - tacticsRecord, err := tactics.UseStoredTactics( - GetTacticsStorer(config), - config.GetNetworkID()) - if err != nil { - NoticeWarning("get stored tactics failed: %s", err) + var tacticsRecord *tactics.Record + + if useStoredTactics { + var err error + tacticsRecord, err = tactics.UseStoredTactics( + GetTacticsStorer(config), + config.GetNetworkID()) + if err != nil { + NoticeWarning("get stored tactics failed: %s", err) - // The error will be due to a local datastore problem. - // While we could proceed with the tactics request, this - // could result in constant tactics requests. So, abort. - return + // The error will be due to a local datastore problem. + // While we could proceed with the tactics request, this + // could result in constant tactics requests. So, abort. + return + } } if tacticsRecord == nil { @@ -125,6 +138,13 @@ func GetTactics(ctx context.Context, config *Config) { if err == nil { if tacticsRecord != nil { + + // Set the return value indicating a successful fetch. + // Note that applying the tactics below may still fail, + // but this is not an expected case and we don't want the + // caller to continuously force refetches after this point. + fetchedTactics = true + // The fetch succeeded, so exit the fetch loop and apply // the result. break @@ -163,8 +183,7 @@ func GetTactics(ctx context.Context, config *Config) { } } - if tacticsRecord != nil && - prng.FlipWeightedCoin(tacticsRecord.Tactics.Probability) { + if tacticsRecord != nil { err := config.SetParameters( tacticsRecord.Tag, true, tacticsRecord.Tactics.Parameters) @@ -184,6 +203,8 @@ func GetTactics(ctx context.Context, config *Config) { // to be proceeding to the memory-intensive tunnel establishment phase. DoGarbageCollection() emitMemoryMetrics() + + return } // fetchTactics performs a tactics request using the specified server entry. diff --git a/psiphon/tactics_test.go b/psiphon/tactics_test.go index 0c8c14474..76cf2fdd9 100644 --- a/psiphon/tactics_test.go +++ b/psiphon/tactics_test.go @@ -117,7 +117,7 @@ func TestStandAloneGetTactics(t *testing.T) { // operations in GetTactics. CloseDataStore() - GetTactics(ctx, config) + GetTactics(ctx, config, true) if atomic.LoadInt32(&gotTactics) != 1 { t.Fatalf("failed to get tactics") diff --git a/psiphon/tunnel.go b/psiphon/tunnel.go index 1b404a87a..aba881eb0 100644 --- a/psiphon/tunnel.go +++ b/psiphon/tunnel.go @@ -1568,6 +1568,7 @@ func dialInproxy( DialAddress: dialAddress, RemoteAddrOverride: remoteAddrOverride, PackedDestinationServerEntry: dialParams.inproxyPackedSignedServerEntry, + MustUpgrade: config.OnInproxyMustUpgrade, } conn, err := inproxy.DialClient(ctx, clientConfig) diff --git a/vendor/github.com/gammazero/deque/.gitignore b/vendor/github.com/gammazero/deque/.gitignore deleted file mode 100644 index b33406fb0..000000000 --- a/vendor/github.com/gammazero/deque/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -*~ - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/gammazero/deque/LICENSE b/vendor/github.com/gammazero/deque/LICENSE deleted file mode 100644 index 0566f2661..000000000 --- a/vendor/github.com/gammazero/deque/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 Andrew J. Gillis - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/gammazero/deque/README.md b/vendor/github.com/gammazero/deque/README.md deleted file mode 100644 index eb06369eb..000000000 --- a/vendor/github.com/gammazero/deque/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# deque - -[![GoDoc](https://pkg.go.dev/badge/github.com/gammazero/deque)](https://pkg.go.dev/github.com/gammazero/deque) -[![Build Status](https://github.com/gammazero/deque/actions/workflows/go.yml/badge.svg)](https://github.com/gammazero/deque/actions/workflows/go.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/gammazero/deque)](https://goreportcard.com/report/github.com/gammazero/deque) -[![codecov](https://codecov.io/gh/gammazero/deque/branch/master/graph/badge.svg)](https://codecov.io/gh/gammazero/deque) -[![License](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) - -Fast ring-buffer deque ([double-ended queue](https://en.wikipedia.org/wiki/Double-ended_queue)) implementation. - -For a pictorial description, see the [Deque diagram](https://github.com/gammazero/deque/wiki) - -## Installation - -``` -$ go get github.com/gammazero/deque -``` - -## Deque data structure - -Deque generalizes a queue and a stack, to efficiently add and remove items at either end with O(1) performance. [Queue](https://en.wikipedia.org/wiki/Queue_(abstract_data_type)) (FIFO) operations are supported using `PushBack` and `PopFront`. [Stack](https://en.wikipedia.org/wiki/Stack_(abstract_data_type)) (LIFO) operations are supported using `PushBack` and `PopBack`. - -## Ring-buffer Performance - -This deque implementation is optimized for CPU and GC performance. The circular buffer automatically re-sizes by powers of two, growing when additional capacity is needed and shrinking when only a quarter of the capacity is used, and uses bitwise arithmetic for all calculations. Since growth is by powers of two, adding elements will only cause O(log n) allocations. A minimum capacity can be set so that there is no resizing at or below that specified amount. - -The ring-buffer implementation improves memory and time performance with fewer GC pauses, compared to implementations based on slices and linked lists. By wrapping around the buffer, previously used space is reused, making allocation unnecessary until all buffer capacity is used. If the deque is only filled and then completely emptied before being filled again, then the ring structure offers little benefit for memory reuse over a slice. - -For maximum speed, this deque implementation leaves concurrency safety up to the application to provide, however the application chooses, if needed at all. - -## Reading Empty Deque - -Since it is OK for the deque to contain a `nil` value, it is necessary to either panic or return a second boolean value to indicate the deque is empty, when reading or removing an element. This deque panics when reading from an empty deque. This is a run-time check to help catch programming errors, which may be missed if a second return value is ignored. Simply check `Deque.Len()` before reading from the deque. - -## Generics - -Deque uses generics to create a Deque that contains items of the type specified. To create a Deque that holds a specific type, provide a type argument to New or with the variable declaration. For example: -```go - stringDeque := deque.New[string]() - var intDeque deque.Deque[int] -``` - -## Example - -```go -package main - -import ( - "fmt" - "github.com/gammazero/deque" -) - -func main() { - var q deque.Deque[string] - q.PushBack("foo") - q.PushBack("bar") - q.PushBack("baz") - - fmt.Println(q.Len()) // Prints: 3 - fmt.Println(q.Front()) // Prints: foo - fmt.Println(q.Back()) // Prints: baz - - q.PopFront() // remove "foo" - q.PopBack() // remove "baz" - - q.PushFront("hello") - q.PushBack("world") - - // Consume deque and print elements. - for q.Len() != 0 { - fmt.Println(q.PopFront()) - } -} -``` - -## Uses - -Deque can be used as both a: -- [Queue](https://en.wikipedia.org/wiki/Queue_(abstract_data_type)) using `PushBack` and `PopFront` -- [Stack](https://en.wikipedia.org/wiki/Stack_(abstract_data_type)) using `PushBack` and `PopBack` diff --git a/vendor/github.com/gammazero/deque/deque.go b/vendor/github.com/gammazero/deque/deque.go deleted file mode 100644 index a1743c161..000000000 --- a/vendor/github.com/gammazero/deque/deque.go +++ /dev/null @@ -1,420 +0,0 @@ -package deque - -import "fmt" - -// minCapacity is the smallest capacity that deque may have. Must be power of 2 -// for bitwise modulus: x % n == x & (n - 1). -const minCapacity = 16 - -// Deque represents a single instance of the deque data structure. A Deque -// instance contains items of the type specified by the type argument. -type Deque[T any] struct { - buf []T - head int - tail int - count int - minCap int -} - -// New creates a new Deque, optionally setting the current and minimum capacity -// when non-zero values are given for these. The Deque instance returns -// operates on items of the type specified by the type argument. For example, -// to create a Deque that contains strings, -// -// stringDeque := deque.New[string]() -// -// To create a Deque with capacity to store 2048 ints without resizing, and -// that will not resize below space for 32 items when removing items: -// -// d := deque.New[int](2048, 32) -// -// To create a Deque that has not yet allocated memory, but after it does will -// never resize to have space for less than 64 items: -// -// d := deque.New[int](0, 64) -// -// Any size values supplied here are rounded up to the nearest power of 2. -func New[T any](size ...int) *Deque[T] { - var capacity, minimum int - if len(size) >= 1 { - capacity = size[0] - if len(size) >= 2 { - minimum = size[1] - } - } - - minCap := minCapacity - for minCap < minimum { - minCap <<= 1 - } - - var buf []T - if capacity != 0 { - bufSize := minCap - for bufSize < capacity { - bufSize <<= 1 - } - buf = make([]T, bufSize) - } - - return &Deque[T]{ - buf: buf, - minCap: minCap, - } -} - -// Cap returns the current capacity of the Deque. If q is nil, q.Cap() is zero. -func (q *Deque[T]) Cap() int { - if q == nil { - return 0 - } - return len(q.buf) -} - -// Len returns the number of elements currently stored in the queue. If q is -// nil, q.Len() is zero. -func (q *Deque[T]) Len() int { - if q == nil { - return 0 - } - return q.count -} - -// PushBack appends an element to the back of the queue. Implements FIFO when -// elements are removed with PopFront, and LIFO when elements are removed with -// PopBack. -func (q *Deque[T]) PushBack(elem T) { - q.growIfFull() - - q.buf[q.tail] = elem - // Calculate new tail position. - q.tail = q.next(q.tail) - q.count++ -} - -// PushFront prepends an element to the front of the queue. -func (q *Deque[T]) PushFront(elem T) { - q.growIfFull() - - // Calculate new head position. - q.head = q.prev(q.head) - q.buf[q.head] = elem - q.count++ -} - -// PopFront removes and returns the element from the front of the queue. -// Implements FIFO when used with PushBack. If the queue is empty, the call -// panics. -func (q *Deque[T]) PopFront() T { - if q.count <= 0 { - panic("deque: PopFront() called on empty queue") - } - ret := q.buf[q.head] - var zero T - q.buf[q.head] = zero - // Calculate new head position. - q.head = q.next(q.head) - q.count-- - - q.shrinkIfExcess() - return ret -} - -// PopBack removes and returns the element from the back of the queue. -// Implements LIFO when used with PushBack. If the queue is empty, the call -// panics. -func (q *Deque[T]) PopBack() T { - if q.count <= 0 { - panic("deque: PopBack() called on empty queue") - } - - // Calculate new tail position - q.tail = q.prev(q.tail) - - // Remove value at tail. - ret := q.buf[q.tail] - var zero T - q.buf[q.tail] = zero - q.count-- - - q.shrinkIfExcess() - return ret -} - -// Front returns the element at the front of the queue. This is the element -// that would be returned by PopFront. This call panics if the queue is empty. -func (q *Deque[T]) Front() T { - if q.count <= 0 { - panic("deque: Front() called when empty") - } - return q.buf[q.head] -} - -// Back returns the element at the back of the queue. This is the element that -// would be returned by PopBack. This call panics if the queue is empty. -func (q *Deque[T]) Back() T { - if q.count <= 0 { - panic("deque: Back() called when empty") - } - return q.buf[q.prev(q.tail)] -} - -// At returns the element at index i in the queue without removing the element -// from the queue. This method accepts only non-negative index values. At(0) -// refers to the first element and is the same as Front(). At(Len()-1) refers -// to the last element and is the same as Back(). If the index is invalid, the -// call panics. -// -// The purpose of At is to allow Deque to serve as a more general purpose -// circular buffer, where items are only added to and removed from the ends of -// the deque, but may be read from any place within the deque. Consider the -// case of a fixed-size circular log buffer: A new entry is pushed onto one end -// and when full the oldest is popped from the other end. All the log entries -// in the buffer must be readable without altering the buffer contents. -func (q *Deque[T]) At(i int) T { - if i < 0 || i >= q.count { - panic(outOfRangeText(i, q.Len())) - } - // bitwise modulus - return q.buf[(q.head+i)&(len(q.buf)-1)] -} - -// Set assigns the item to index i in the queue. Set indexes the deque the same -// as At but perform the opposite operation. If the index is invalid, the call -// panics. -func (q *Deque[T]) Set(i int, item T) { - if i < 0 || i >= q.count { - panic(outOfRangeText(i, q.Len())) - } - // bitwise modulus - q.buf[(q.head+i)&(len(q.buf)-1)] = item -} - -// Clear removes all elements from the queue, but retains the current capacity. -// This is useful when repeatedly reusing the queue at high frequency to avoid -// GC during reuse. The queue will not be resized smaller as long as items are -// only added. Only when items are removed is the queue subject to getting -// resized smaller. -func (q *Deque[T]) Clear() { - var zero T - modBits := len(q.buf) - 1 - h := q.head - for i := 0; i < q.Len(); i++ { - q.buf[(h+i)&modBits] = zero - } - q.head = 0 - q.tail = 0 - q.count = 0 -} - -// Rotate rotates the deque n steps front-to-back. If n is negative, rotates -// back-to-front. Having Deque provide Rotate avoids resizing that could happen -// if implementing rotation using only Pop and Push methods. If q.Len() is one -// or less, or q is nil, then Rotate does nothing. -func (q *Deque[T]) Rotate(n int) { - if q.Len() <= 1 { - return - } - // Rotating a multiple of q.count is same as no rotation. - n %= q.count - if n == 0 { - return - } - - modBits := len(q.buf) - 1 - // If no empty space in buffer, only move head and tail indexes. - if q.head == q.tail { - // Calculate new head and tail using bitwise modulus. - q.head = (q.head + n) & modBits - q.tail = q.head - return - } - - var zero T - - if n < 0 { - // Rotate back to front. - for ; n < 0; n++ { - // Calculate new head and tail using bitwise modulus. - q.head = (q.head - 1) & modBits - q.tail = (q.tail - 1) & modBits - // Put tail value at head and remove value at tail. - q.buf[q.head] = q.buf[q.tail] - q.buf[q.tail] = zero - } - return - } - - // Rotate front to back. - for ; n > 0; n-- { - // Put head value at tail and remove value at head. - q.buf[q.tail] = q.buf[q.head] - q.buf[q.head] = zero - // Calculate new head and tail using bitwise modulus. - q.head = (q.head + 1) & modBits - q.tail = (q.tail + 1) & modBits - } -} - -// Index returns the index into the Deque of the first item satisfying f(item), -// or -1 if none do. If q is nil, then -1 is always returned. Search is linear -// starting with index 0. -func (q *Deque[T]) Index(f func(T) bool) int { - if q.Len() > 0 { - modBits := len(q.buf) - 1 - for i := 0; i < q.count; i++ { - if f(q.buf[(q.head+i)&modBits]) { - return i - } - } - } - return -1 -} - -// RIndex is the same as Index, but searches from Back to Front. The index -// returned is from Front to Back, where index 0 is the index of the item -// returned by Front(). -func (q *Deque[T]) RIndex(f func(T) bool) int { - if q.Len() > 0 { - modBits := len(q.buf) - 1 - for i := q.count - 1; i >= 0; i-- { - if f(q.buf[(q.head+i)&modBits]) { - return i - } - } - } - return -1 -} - -// Insert is used to insert an element into the middle of the queue, before the -// element at the specified index. Insert(0,e) is the same as PushFront(e) and -// Insert(Len(),e) is the same as PushBack(e). Accepts only non-negative index -// values, and panics if index is out of range. -// -// Important: Deque is optimized for O(1) operations at the ends of the queue, -// not for operations in the the middle. Complexity of this function is -// constant plus linear in the lesser of the distances between the index and -// either of the ends of the queue. -func (q *Deque[T]) Insert(at int, item T) { - if at < 0 || at > q.count { - panic(outOfRangeText(at, q.Len())) - } - if at*2 < q.count { - q.PushFront(item) - front := q.head - for i := 0; i < at; i++ { - next := q.next(front) - q.buf[front], q.buf[next] = q.buf[next], q.buf[front] - front = next - } - return - } - swaps := q.count - at - q.PushBack(item) - back := q.prev(q.tail) - for i := 0; i < swaps; i++ { - prev := q.prev(back) - q.buf[back], q.buf[prev] = q.buf[prev], q.buf[back] - back = prev - } -} - -// Remove removes and returns an element from the middle of the queue, at the -// specified index. Remove(0) is the same as PopFront() and Remove(Len()-1) is -// the same as PopBack(). Accepts only non-negative index values, and panics if -// index is out of range. -// -// Important: Deque is optimized for O(1) operations at the ends of the queue, -// not for operations in the the middle. Complexity of this function is -// constant plus linear in the lesser of the distances between the index and -// either of the ends of the queue. -func (q *Deque[T]) Remove(at int) T { - if at < 0 || at >= q.Len() { - panic(outOfRangeText(at, q.Len())) - } - - rm := (q.head + at) & (len(q.buf) - 1) - if at*2 < q.count { - for i := 0; i < at; i++ { - prev := q.prev(rm) - q.buf[prev], q.buf[rm] = q.buf[rm], q.buf[prev] - rm = prev - } - return q.PopFront() - } - swaps := q.count - at - 1 - for i := 0; i < swaps; i++ { - next := q.next(rm) - q.buf[rm], q.buf[next] = q.buf[next], q.buf[rm] - rm = next - } - return q.PopBack() -} - -// SetMinCapacity sets a minimum capacity of 2^minCapacityExp. If the value of -// the minimum capacity is less than or equal to the minimum allowed, then -// capacity is set to the minimum allowed. This may be called at anytime to set -// a new minimum capacity. -// -// Setting a larger minimum capacity may be used to prevent resizing when the -// number of stored items changes frequently across a wide range. -func (q *Deque[T]) SetMinCapacity(minCapacityExp uint) { - if 1< minCapacity { - q.minCap = 1 << minCapacityExp - } else { - q.minCap = minCapacity - } -} - -// prev returns the previous buffer position wrapping around buffer. -func (q *Deque[T]) prev(i int) int { - return (i - 1) & (len(q.buf) - 1) // bitwise modulus -} - -// next returns the next buffer position wrapping around buffer. -func (q *Deque[T]) next(i int) int { - return (i + 1) & (len(q.buf) - 1) // bitwise modulus -} - -// growIfFull resizes up if the buffer is full. -func (q *Deque[T]) growIfFull() { - if q.count != len(q.buf) { - return - } - if len(q.buf) == 0 { - if q.minCap == 0 { - q.minCap = minCapacity - } - q.buf = make([]T, q.minCap) - return - } - q.resize() -} - -// shrinkIfExcess resize down if the buffer 1/4 full. -func (q *Deque[T]) shrinkIfExcess() { - if len(q.buf) > q.minCap && (q.count<<2) == len(q.buf) { - q.resize() - } -} - -// resize resizes the deque to fit exactly twice its current contents. This is -// used to grow the queue when it is full, and also to shrink it when it is -// only a quarter full. -func (q *Deque[T]) resize() { - newBuf := make([]T, q.count<<1) - if q.tail > q.head { - copy(newBuf, q.buf[q.head:q.tail]) - } else { - n := copy(newBuf, q.buf[q.head:]) - copy(newBuf[n:], q.buf[:q.tail]) - } - - q.head = 0 - q.tail = q.count - q.buf = newBuf -} - -func outOfRangeText(i, len int) string { - return fmt.Sprintf("deque: index out of range %d with length %d", i, len) -} diff --git a/vendor/github.com/gammazero/deque/doc.go b/vendor/github.com/gammazero/deque/doc.go deleted file mode 100644 index 6cfead994..000000000 --- a/vendor/github.com/gammazero/deque/doc.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Package deque provides a fast ring-buffer deque (double-ended queue) -implementation. - -Deque generalizes a queue and a stack, to efficiently add and remove items at -either end with O(1) performance. Queue (FIFO) operations are supported using -PushBack and PopFront. Stack (LIFO) operations are supported using PushBack and -PopBack. - -# Ring-buffer Performance - -The ring-buffer automatically resizes by powers of two, growing when additional -capacity is needed and shrinking when only a quarter of the capacity is used, -and uses bitwise arithmetic for all calculations. - -The ring-buffer implementation significantly improves memory and time -performance with fewer GC pauses, compared to implementations based on slices -and linked lists. - -For maximum speed, this deque implementation leaves concurrency safety up to -the application to provide, however the application chooses, if needed at all. - -# Reading Empty Deque - -Since it is OK for the deque to contain the zero-value of an item, it is -necessary to either panic or return a second boolean value to indicate the -deque is empty, when reading or removing an element. This deque panics when -reading from an empty deque. This is a run-time check to help catch programming -errors, which may be missed if a second return value is ignored. Simply check -Deque.Len() before reading from the deque. - -# Generics - -Deque uses generics to create a Deque that contains items of the type -specified. To create a Deque that holds a specific type, provide a type -argument to New or with the variable declaration. -*/ -package deque diff --git a/vendor/modules.txt b/vendor/modules.txt index 0fa318bfd..16c614b08 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -110,9 +110,6 @@ github.com/flynn/noise # github.com/fxamacker/cbor/v2 v2.5.0 ## explicit; go 1.12 github.com/fxamacker/cbor/v2 -# github.com/gammazero/deque v0.2.1 -## explicit; go 1.18 -github.com/gammazero/deque # github.com/gaukas/godicttls v0.0.4 ## explicit; go 1.19 github.com/gaukas/godicttls