Skip to content
This repository has been archived by the owner on Jan 11, 2024. It is now read-only.

Commit

Permalink
Merge branch 'master' of https://github.com/koli/kong-ingress
Browse files Browse the repository at this point in the history
  • Loading branch information
sandromello committed Oct 13, 2017
2 parents 7373280 + a85518d commit bd66978
Show file tree
Hide file tree
Showing 16,052 changed files with 1,528,607 additions and 3,287,935 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
30 changes: 30 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# v0.3.0-alpha

**Image:** `quay.io/koli/kong-ingress:v0.3.0-alpha`

## BREAKING CHANGE

Third Party Resources are deprecated in v1.7.0 and removed in v1.8.0, this release depreciate TPR and includes Custom Resource Definition, thus it's important to migrate all the domain resources. See this [article](https://kubernetes.io/docs/tasks/access-kubernetes-api/migrate-third-party-resource/) to understand how this migration works.

## Changes

- Update kubernetes library to the latest release

# v0.2.1-alpha

**Image:** `quay.io/koli/kong-ingress:v0.2.1-alpha`

## Added

- Add support for custom ports on ingress [#17](https://github.com/kolihub/kong-ingress/issues/17)

# v0.2.0-alpha

**Image:** `quay.io/koli/kong-ingress:v0.2.0-alpha`

## Features

- Expose applications through domain names
- Host collision prevention
- Delegate subdomains throughout a kubernetes cluster
- Path based routing
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,17 @@ Kong it's an API Gateway that deals with L7 traffic, the ingress uses the kong a

### Domain Claims

Some of the main problems of using name based virtual hosting with ingress is that you can't know who's is the owner of a specific host, thus a Kong api could be updated by multiple ingress resources resulting in a unwanted behaviour.
Some of the main problems of using name based virtual hosting with ingress is that you can't know who's the owner of a specific host, thus a Kong api could be updated by multiple ingress resources resulting in an unwanted behaviour.

A third party resource is used to allow the kong ingress to lease domains for each host specified in ingress resources. If a domain is already claimed in the cluster, the controller rejects the creation of apis on Kong.
A [Custom Resource Definition](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) is used to allow the kong ingress to lease domains for each host specified in ingress resources. If a domain is already claimed in the cluster, the controller rejects the creation of apis on Kong.

[More info](./docs/domain-claims.md)

> More info about the issue: https://github.com/kubernetes/kubernetes/issues/30151
## Prerequisites

- Kubernetes cluster v1.6.0+
- Kubernetes cluster v1.7.0+
- Kubernetes DNS add-on
- Kong server v0.10.0+

Expand Down
39 changes: 20 additions & 19 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,17 @@ import (
"github.com/golang/glog"
"github.com/spf13/pflag"

"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/rest"

apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubescheme "k8s.io/client-go/kubernetes/scheme"
)

// TODO: test with wipeondelete (on/off)
Expand Down Expand Up @@ -59,7 +60,7 @@ func init() {
pflag.StringVar(&cfg.TLSConfig.KeyFile, "key-file", "", "path to private TLS certificate file.")
pflag.StringVar(&cfg.TLSConfig.CAFile, "ca-file", "", "path to TLS CA file.")
pflag.StringVar(&cfg.KongAdminHost, "kong-server", "", "kong admin api service, e.g. 'http://127.0.0.1:8001'")
pflag.BoolVar(&cfg.AutoClaim, "auto-claim", false, "try to claim the hosts on creation")
pflag.BoolVar(&cfg.AutoClaim, "auto-claim", false, "try to claim hosts on new ingresses")
pflag.Int64Var(&cfg.ResyncOnFailed, "resync-on-fail", 60, "time to resync a domain in a failed state phase in seconds")
pflag.BoolVar(&cfg.WipeOnDelete, "wipe-on-delete", false, "wipe all orphan kong apis when deleting a domain resource")
pflag.StringVar(&cfg.ClusterDNS, "cluster-dns", "svc.cluster.local", "kubernetes cluster dns name, used to configure the upstream apis in Kong")
Expand All @@ -68,6 +69,9 @@ func init() {
pflag.BoolVar(&showVersion, "version", false, "print version information and quit")
pflag.BoolVar(&cfg.TLSInsecure, "tls-insecure", false, "don't verify API server's CA certificate.")
pflag.Parse()
// Convinces goflags that we have called Parse() to avoid noisy logs.
// OSS Issue: kubernetes/kubernetes#17162.
flag.CommandLine.Parse([]string{})
}

func main() {
Expand Down Expand Up @@ -95,23 +99,21 @@ func main() {
TLSClientConfig: cfg.TLSConfig,
}
}
var tprConfig *rest.Config
tprConfig = config
tprConfig.APIPath = "/apis"
tprConfig.GroupVersion = &kong.SchemeGroupVersion
tprConfig.ContentType = runtime.ContentTypeJSON
tprConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs}
metav1.AddToGroupVersion(api.Scheme, kong.SchemeGroupVersion)
kong.SchemeBuilder.AddToScheme(api.Scheme)

tprClient, err := rest.RESTClientFor(tprConfig)
if err != nil {
glog.Fatalf("failed retrieving tprclient from config: %v", err)
var extConfig *rest.Config
extConfig = config
extConfig.APIPath = "/apis"
extConfig.GroupVersion = &kong.SchemeGroupVersion
extConfig.ContentType = runtime.ContentTypeJSON
extConfig.NegotiatedSerializer = serializer.DirectCodecFactory{
CodecFactory: kubescheme.Codecs,
}
kubeClient, err := kubernetes.NewForConfig(config)
kong.SchemeBuilder.AddToScheme(kubescheme.Scheme)
extClient, err := rest.RESTClientFor(extConfig)
if err != nil {
glog.Fatalf("failed retrieving client from config: %v", err)
glog.Fatalf("failed retrieving extensions client: %v", err)
}
kubeClient := kubernetes.NewForConfigOrDie(config)

kongcli, err := kong.NewKongRESTClient(&rest.Config{Host: cfg.KongAdminHost, Timeout: time.Second * 2})
if err != nil {
Expand All @@ -134,14 +136,13 @@ func main() {
} else {
cfg.PodNamespace = os.Getenv("POD_NAMESPACE")
}

if err := controller.CreateDomainTPRs(cfg.Host, kubeClient); err != nil {
if err := controller.CreateCRD(apiextensionsclient.NewForConfigOrDie(config)); err != nil {
glog.Fatalf("failed creating domains TPR: %s", err)
}

go controller.NewKongController(
kubeClient,
tprClient,
extClient,
kongcli,
&cfg,
time.Second*120,
Expand Down
1 change: 0 additions & 1 deletion docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,4 +173,3 @@ spec:
serviceName: web
servicePort: 80
```
33 changes: 18 additions & 15 deletions docs/domain-claims.md
Original file line number Diff line number Diff line change
@@ -1,15 +1,21 @@
# Domain Claims

Domain Claims are needed to control who owns a specific host in the cluster, the third party resource below is used to represent the information about claims:
Domain Claims are needed to control who owns a specific host in the cluster, the custom resource defintion resource below is used to represent the information about claims:

```yaml
apiVersion: extensions/v1beta1
kind: ThirdPartyResource
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: domain.platform.koli.io
description: Holds information about domain claims to prevent duplicated hosts in ingress resources
versions:
- name: v1
name: domains.platform.koli.io
spec:
group: platform.koli.io
names:
kind: Domain
listKind: DomainList
plural: domains
singular: domain
scope: Namespaced
version: v1
```
A domain claim is represented with the following specification:
Expand Down Expand Up @@ -53,7 +59,7 @@ A `shared` is a subdomain and means the domain is inherit from a `primary` type.

1) Search in the `parent` attribute (must be a valid namespace)
2) Search in the `shared` domain resource namespace
3) Search in the [system namespace]()
3) Search in the system namespace (the namespace in which the ingress controller is running)

If a `primary` domain couldn't be found, the resource is configured to a failing state and it will be retried until a `primary` be found.

Expand All @@ -75,7 +81,7 @@ spec:

## Parent

A `parent` attribute it's only useful when the resource is a `shared` type. It indicates the namespace to search for the `primary` domain, if it fail, fallbacks searching in the namespace of the resource and in [system namespace]()
A `parent` attribute it's only useful when the resource is a `shared` type. It indicates the namespace to search for the `primary` domain, if it fail, fallbacks searching in the namespace of the resource and in system namespace

> The `parent` namespace must explicity allow using the attribute `delegates`

Expand Down Expand Up @@ -141,7 +147,7 @@ spec:
parent: acme-org
status:
phase: Failed
message: The primary domain wasn't found"
message: Primary domain not found
reason: DomainNotFound
lastUpdateTime: 2017-04-04T12:25:42Z
```
Expand All @@ -150,9 +156,6 @@ status:

The resource is prepared to be provisioned, in this state the kubernetes finalizer `kolihub.io/kong` is set and the status is changed to `Pending`. The status is represented by an empty string: `""`

> **Note:** The finalizer doesn't do anything at this time, because the implementation is broken in Kubernetes: https://github.com/kubernetes/kubernetes/issues/40715.
> In the future it will be used to clean the associated resources more efficiently (already implemented).

## Pending

The `Pending` state means the controller is searching for duplicates or inconsistencies.
Expand All @@ -167,12 +170,12 @@ If the domain doesn't contain any inconsistencies or duplicates, the state of th

## OK

The domain is ready to be used in a ingress resource.
The domain is ready to be used in an ingress resource.

## Failed

This state means the claim has failed, the details are described in `reason` and `message` attributes.

> **Note about status:** The status spec from a domain resource is used to control the state of a domain, the controller will act accordingly to this information.
> The `status` attributes aren't immutable, thus an user could change it causing an undesirable behaviour for the resource.
> The `status` attribute isn't immutable, thus an user could change it causing an undesirable behaviour for the resource.
> [Related issue.](https://github.com/kubernetes/kubernetes/issues/38113)
41 changes: 24 additions & 17 deletions pkg/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,15 @@ import (
"github.com/golang/glog"
"kolihub.io/kong-ingress/pkg/kong"

"k8s.io/api/core/v1"
v1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/rest"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"

Expand All @@ -36,7 +37,7 @@ var (
// KongController watches the kubernetes api server and adds/removes apis on Kong
type KongController struct {
client kubernetes.Interface
tprClient rest.Interface
extClient restclient.Interface
kongcli *kong.CoreClient

infIng cache.SharedIndexInformer
Expand All @@ -52,25 +53,31 @@ type KongController struct {
}

// NewKongController creates a new KongController
func NewKongController(client kubernetes.Interface, tprClient rest.Interface, kongcli *kong.CoreClient, cfg *Config, resyncPeriod time.Duration) *KongController {
func NewKongController(
client kubernetes.Interface,
extClient restclient.Interface,
kongcli *kong.CoreClient,
cfg *Config,
resyncPeriod time.Duration,
) *KongController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{
Interface: v1core.New(client.Core().RESTClient()).Events(""),
})
kc := &KongController{
client: client,
tprClient: tprClient,
extClient: extClient,
kongcli: kongcli,
recorder: eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: "kong-controller"}),
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kong-controller"}),
cfg: cfg,
}
kc.ingQueue = NewTaskQueue(kc.syncIngress, "kong_ingress_queue")
kc.domQueue = NewTaskQueue(kc.syncDomain, "kong_domain_queue")
kc.svcQueue = NewTaskQueue(kc.syncServices, "kong_service_queue")

kc.infIng = cache.NewSharedIndexInformer(
cache.NewListWatchFromClient(client.Extensions().RESTClient(), "ingresses", api.NamespaceAll, fields.Everything()),
cache.NewListWatchFromClient(client.Extensions().RESTClient(), "ingresses", metav1.NamespaceAll, fields.Everything()),
&v1beta1.Ingress{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
Expand Down Expand Up @@ -103,7 +110,7 @@ func NewKongController(client kubernetes.Interface, tprClient rest.Interface, ko
})

kc.infSvc = cache.NewSharedIndexInformer(
cache.NewListWatchFromClient(client.Core().RESTClient(), "services", api.NamespaceAll, fields.Everything()),
cache.NewListWatchFromClient(client.Core().RESTClient(), "services", metav1.NamespaceAll, fields.Everything()),
&v1.Service{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
Expand All @@ -126,7 +133,7 @@ func NewKongController(client kubernetes.Interface, tprClient rest.Interface, ko
})

kc.infDom = cache.NewSharedIndexInformer(
cache.NewListWatchFromClient(tprClient, "domains", api.NamespaceAll, fields.Everything()),
cache.NewListWatchFromClient(extClient, "domains", metav1.NamespaceAll, fields.Everything()),
&kong.Domain{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
Expand Down Expand Up @@ -248,7 +255,7 @@ func (k *KongController) syncIngress(key string, numRequeues int) error {
}
if !isAllowed {
if numRequeues > 2 {
k.recorder.Eventf(ing, v1.EventTypeWarning, "DomainNotFound", "The domain '%s' wasn't claimed, check its state", notFoundHost)
k.recorder.Eventf(ing, v1.EventTypeWarning, "DomainNotFound", "The domain '%s' was not claimed, check its state", notFoundHost)
}
return fmt.Errorf("failed claiming domain %s, check its state!", notFoundHost)
}
Expand Down Expand Up @@ -361,16 +368,16 @@ func (k *KongController) claimDomains(ing *v1beta1.Ingress) error {
continue
}
glog.Infof("%s/%s - Updating %s domain %s ...", ing.Namespace, ing.Name, domainType, d.GetDomain())
domCopy, err := dom.DeepCopy()
if err != nil {
return fmt.Errorf("failed deep copying resource [%s]", err)
domCopy := dom.DeepCopy()
if domCopy == nil {
return fmt.Errorf("failed deep copying resource [%v]", dom)
}
domCopy.Spec = d.Spec
// If the domain exists, try to recover its status requeuing as a new domain
if domCopy.Status.Phase != kong.DomainStatusOK {
domCopy.Status = kong.DomainStatus{Phase: kong.DomainStatusNew}
}
res, err := k.tprClient.Put().
res, err := k.extClient.Put().
Resource("domains").
Name(domCopy.Name).
Namespace(ing.Namespace).
Expand All @@ -381,7 +388,7 @@ func (k *KongController) claimDomains(ing *v1beta1.Ingress) error {
}

} else {
res, err := k.tprClient.Post().
res, err := k.extClient.Post().
Resource("domains").
Namespace(ing.Namespace).
Body(d).
Expand Down
Loading

0 comments on commit bd66978

Please sign in to comment.