Merge branch 'master' into KNOX-998-Package_Restructuring
authorSandeep More <more@apache.org>
Mon, 13 Nov 2017 14:44:22 +0000 (09:44 -0500)
committerSandeep More <more@apache.org>
Mon, 13 Nov 2017 14:44:22 +0000 (09:44 -0500)
# Conflicts:
# gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
# gateway-spi/src/main/java/org/apache/knox/gateway/dispatch/DefaultHttpClientFactory.java

27 files changed:
1  2 
gateway-demo-ldap-launcher/pom.xml
gateway-demo-ldap/pom.xml
gateway-provider-ha/src/test/java/org/apache/knox/gateway/ha/provider/impl/DefaultURLManagerTest.java
gateway-release/pom.xml
gateway-server-launcher/pom.xml
gateway-server/src/main/java/org/apache/knox/gateway/deploy/DeploymentFactory.java
gateway-server/src/main/java/org/apache/knox/gateway/deploy/impl/ServiceDefinitionDeploymentContributor.java
gateway-server/src/main/java/org/apache/knox/gateway/services/metrics/impl/instr/InstrHttpClientBuilderProvider.java
gateway-server/src/main/java/org/apache/knox/gateway/topology/builder/BeanPropertyTopologyBuilder.java
gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptor.java
gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandler.java
gateway-server/src/main/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorImpl.java
gateway-server/src/main/java/org/apache/knox/gateway/topology/xml/KnoxFormatXmlTopologyRules.java
gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorFactoryTest.java
gateway-server/src/test/java/org/apache/knox/gateway/topology/simple/SimpleDescriptorHandlerTest.java
gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/TopologiesResource.java
gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/BeanConverter.java
gateway-service-admin/src/main/java/org/apache/knox/gateway/service/admin/beans/Topology.java
gateway-service-definitions/src/main/java/org/apache/knox/gateway/service/definition/CustomDispatch.java
gateway-shell-launcher/pom.xml
gateway-shell-release/pom.xml
gateway-spi/src/main/java/org/apache/knox/gateway/dispatch/DefaultHttpClientFactory.java
gateway-spi/src/main/java/org/apache/knox/gateway/i18n/GatewaySpiMessages.java
gateway-spi/src/main/java/org/apache/knox/gateway/topology/Topology.java
gateway-test/src/test/java/org/apache/knox/gateway/deploy/DeploymentFactoryFuncTest.java
knox-cli-launcher/pom.xml
pom.xml

Simple merge
Simple merge
index c8b6c58,0000000..a2cfa54
mode 100644,000000..100644
--- /dev/null
@@@ -1,73 -1,0 +1,92 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.ha.provider.impl;
 +
 +import org.junit.Test;
 +
 +import java.util.ArrayList;
 +
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertTrue;
 +
 +public class DefaultURLManagerTest {
 +
 +   @Test
 +   public void testActiveURLManagement() {
 +      ArrayList<String> urls = new ArrayList<>();
 +      String url1 = "http://host1";
 +      urls.add(url1);
 +      String url2 = "http://host2";
 +      urls.add(url2);
 +      DefaultURLManager manager = new DefaultURLManager();
 +      manager.setURLs(urls);
 +      assertTrue(manager.getURLs().containsAll(urls));
 +      assertEquals(url1, manager.getActiveURL());
 +      manager.markFailed(url1);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url2);
 +      assertEquals(url1, manager.getActiveURL());
 +   }
 +
++   /**
++    * KNOX-1104
++    * Verify that a service with HaProvider configuration, but only a single URL does not break the HaProvider.
++    */
++   @Test
++   public void testSingleURLManagement() {
++      ArrayList<String> urls = new ArrayList<>();
++      String url1 = "http://host1";
++      urls.add(url1);
++      DefaultURLManager manager = new DefaultURLManager();
++      manager.setURLs(urls);
++      assertTrue(manager.getURLs().containsAll(urls));
++      assertEquals(url1, manager.getActiveURL());
++      manager.markFailed(url1);
++      assertEquals(url1, manager.getActiveURL());
++      manager.markFailed(url1);
++      assertEquals(url1, manager.getActiveURL());
++   }
++
 +   @Test
 +   public void testMarkingFailedURL() {
 +      ArrayList<String> urls = new ArrayList<>();
 +      String url1 = "http://host1:4555";
 +      urls.add(url1);
 +      String url2 = "http://host2:1234";
 +      urls.add(url2);
 +      String url3 = "http://host1:1234";
 +      urls.add(url3);
 +      String url4 = "http://host2:4555";
 +      urls.add(url4);
 +      DefaultURLManager manager = new DefaultURLManager();
 +      manager.setURLs(urls);
 +      assertTrue(manager.getURLs().containsAll(urls));
 +      assertEquals(url1, manager.getActiveURL());
 +      manager.markFailed(url1);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url1);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url3);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url4);
 +      assertEquals(url2, manager.getActiveURL());
 +      manager.markFailed(url2);
 +      assertEquals(url3, manager.getActiveURL());
 +   }
 +
 +}
Simple merge
Simple merge
index bb8f1f2,0000000..b3eabb2
mode 100644,000000..100644
--- /dev/null
@@@ -1,772 -1,0 +1,795 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.deploy;
 +
 +import java.beans.Statement;
 +import java.io.File;
 +import java.io.IOException;
 +import java.io.StringWriter;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Iterator;
 +import java.util.LinkedHashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Map.Entry;
 +import java.util.ServiceLoader;
 +import java.util.Set;
 +import java.util.TreeMap;
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.JAXBException;
 +import javax.xml.bind.Marshaller;
 +
 +import org.apache.knox.gateway.GatewayMessages;
 +import org.apache.knox.gateway.GatewayServlet;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.deploy.impl.ApplicationDeploymentContributor;
 +import org.apache.knox.gateway.descriptor.GatewayDescriptor;
 +import org.apache.knox.gateway.descriptor.GatewayDescriptorFactory;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.registry.ServiceRegistry;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.topology.Version;
 +import org.apache.knox.gateway.util.ServiceDefinitionsLoader;
 +import org.apache.knox.gateway.util.Urls;
 +import org.jboss.shrinkwrap.api.ShrinkWrap;
 +import org.jboss.shrinkwrap.api.asset.Asset;
 +import org.jboss.shrinkwrap.api.asset.StringAsset;
 +import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
 +import org.jboss.shrinkwrap.api.spec.WebArchive;
 +import org.jboss.shrinkwrap.descriptor.api.Descriptors;
 +import org.jboss.shrinkwrap.descriptor.api.webapp30.WebAppDescriptor;
 +import org.jboss.shrinkwrap.descriptor.api.webcommon30.FilterType;
 +import org.jboss.shrinkwrap.descriptor.api.webcommon30.ServletType;
 +
 +public abstract class DeploymentFactory {
 +
 +  private static final String SERVLET_NAME_SUFFIX = "-knox-gateway-servlet";
 +  private static final String FILTER_NAME_SUFFIX = "-knox-gateway-filter";
 +  private static final GatewayMessages log = MessagesFactory.get( GatewayMessages.class );
 +  private static GatewayServices gatewayServices = null;
 +
 +  private static Map<String,Map<String,Map<Version, ServiceDeploymentContributor>>> SERVICE_CONTRIBUTOR_MAP;
 +  static {
 +    loadServiceContributors();
 +  }
 +
 +  private static Set<ProviderDeploymentContributor> PROVIDER_CONTRIBUTORS;
 +  private static Map<String,Map<String,ProviderDeploymentContributor>> PROVIDER_CONTRIBUTOR_MAP;
 +  static {
 +    loadProviderContributors();
 +  }
 +
 +  public static void setGatewayServices(GatewayServices services) {
 +    DeploymentFactory.gatewayServices = services;
 +  }
 +
 +  static List<Application> findApplicationsByUrl( Topology topology, String url ) {
 +    List<Application> foundApps = new ArrayList<Application>();
 +    if( topology != null ) {
 +      url = Urls.trimLeadingAndTrailingSlash( url );
 +      Collection<Application> searchApps = topology.getApplications();
 +      if( searchApps != null ) {
 +        for( Application searchApp : searchApps ) {
 +          List<String> searchUrls = searchApp.getUrls();
 +          if( searchUrls == null || searchUrls.isEmpty() ) {
 +            searchUrls = new ArrayList<String>(1);
 +            searchUrls.add( searchApp.getName() );
 +          }
 +          for( String searchUrl : searchUrls ) {
 +            if( url.equalsIgnoreCase( Urls.trimLeadingAndTrailingSlash( searchUrl ) ) ) {
 +              foundApps.add( searchApp );
 +              break;
 +            }
 +          }
 +        }
 +      }
 +    }
 +    return foundApps;
 +  }
 +
 +  // Verify that there are no two apps with duplicate urls.
 +  static void validateNoAppsWithDuplicateUrlsInTopology( Topology topology ) {
 +    if( topology != null ) {
 +      Collection<Application> apps = topology.getApplications();
 +      if( apps != null ) {
 +        for( Application app : apps ) {
 +          List<String> urls = app.getUrls();
 +          if( urls == null || urls.isEmpty() ) {
 +            urls = new ArrayList<String>(1);
 +            urls.add( app.getName() );
 +          }
 +          for( String url : urls ) {
 +            List<Application> dups = findApplicationsByUrl( topology, url );
 +            if( dups != null ) {
 +              for( Application dup : dups ) {
 +                if( dup != app ) {
 +                  throw new DeploymentException( "Topology " + topology.getName() + " contains applications " + app.getName() + " and " + dup.getName() + " with the same url: " + url );
 +                }
 +              }
 +            }
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  // Verify that if there are services that there are no applications with a root url.
 +  static void validateNoAppsWithRootUrlsInServicesTopology( Topology topology ) {
 +    if( topology != null ) {
 +      if( topology.getServices() != null && !topology.getServices().isEmpty() ) {
 +        List<Application> dups = findApplicationsByUrl( topology, "/" );
 +        if( dups != null && !dups.isEmpty() ) {
 +          throw new DeploymentException( "Topology " + topology.getName() + " contains both services and an application " + dups.get( 0 ).getName() + " with a root url." );
 +        }
 +      }
 +    }
 +  }
 +
 +  static void validateTopology( Topology topology ) {
 +    validateNoAppsWithRootUrlsInServicesTopology( topology );
 +    validateNoAppsWithDuplicateUrlsInTopology( topology );
 +  }
 +
 +  public static EnterpriseArchive createDeployment( GatewayConfig config, Topology topology ) {
 +    validateTopology( topology );
 +    loadStacksServiceContributors( config );
 +    Map<String,List<ProviderDeploymentContributor>> providers = selectContextProviders( topology );
 +    Map<String,List<ServiceDeploymentContributor>> services = selectContextServices( topology );
 +    Map<String,ServiceDeploymentContributor> applications = selectContextApplications( config, topology );
 +    EnterpriseArchive ear = ShrinkWrap.create( EnterpriseArchive.class, topology.getName() );
 +    ear.addAsResource( toStringAsset( topology ), "topology.xml" );
 +    if( !services.isEmpty() ) {
 +      WebArchive war = createServicesDeployment( config, topology, providers, services );
 +      ear.addAsModule( war );
 +    }
 +    if( !applications.isEmpty() ) {
 +      for( Map.Entry<String, ServiceDeploymentContributor> application : applications.entrySet() ) {
 +        WebArchive war = createApplicationDeployment( config, topology, providers, application );
 +        ear.addAsModule( war );
 +      }
 +    }
 +    return ear;
 +  }
 +
 +  private static WebArchive createServicesDeployment(
 +      GatewayConfig config,
 +      Topology topology,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services ) {
 +    DeploymentContext context = createDeploymentContext( config, "/", topology, providers );
 +    initialize( context, providers, services, null );
 +    contribute( context, providers, services, null );
 +    finalize( context, providers, services, null );
 +    return context.getWebArchive();
 +  }
 +
 +  public static WebArchive createApplicationDeployment(
 +      GatewayConfig config,
 +      Topology topology,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map.Entry<String,ServiceDeploymentContributor> application ) {
 +    String appPath = "/" + Urls.trimLeadingAndTrailingSlash( application.getKey() );
 +    DeploymentContext context = createDeploymentContext( config, appPath, topology, providers );
 +    initialize( context, providers, null, application );
 +    contribute( context, providers, null, application );
 +    finalize( context, providers, null, application );
 +    return context.getWebArchive();
 +  }
 +
 +  private static Asset toStringAsset( Topology topology ) {
 +    StringWriter writer = new StringWriter();
 +    String xml;
 +    try {
 +      Map<String,Object> properties = new HashMap<>(2);
 +      properties.put( "eclipselink-oxm-xml",
 +          "org/apache/knox/gateway/topology/topology_binding-xml.xml");
 +      properties.put( "eclipselink.media-type", "application/xml" );
 +      JAXBContext jaxbContext = JAXBContext.newInstance( Topology.class.getPackage().getName(), Topology.class.getClassLoader() , properties );
 +      Marshaller marshaller = jaxbContext.createMarshaller();
 +      marshaller.setProperty( Marshaller.JAXB_FORMATTED_OUTPUT, true );
 +      marshaller.marshal( topology, writer );
 +      writer.close();
 +      xml = writer.toString();
 +    } catch (IOException e) {
 +      throw new DeploymentException( "Failed to marshall topology.", e );
 +    } catch (JAXBException e) {
 +      throw new DeploymentException( "Failed to marshall topology.", e );
 +    }
 +    StringAsset asset = new StringAsset( xml );
 +    return asset;
 +  }
 +
 +  private static DeploymentContext createDeploymentContext(
 +      GatewayConfig config,
 +      String archivePath,
 +      Topology topology,
 +      Map<String,List<ProviderDeploymentContributor>> providers ) {
 +    archivePath = Urls.encode( archivePath );
 +    WebArchive webArchive = ShrinkWrap.create( WebArchive.class, archivePath );
 +    WebAppDescriptor webAppDesc = Descriptors.create( WebAppDescriptor.class );
 +    GatewayDescriptor gateway = GatewayDescriptorFactory.create();
 +    DeploymentContext context = new DeploymentContextImpl(
 +        config, topology, gateway, webArchive, webAppDesc, providers );
 +    return context;
 +  }
 +
 +  // Scan through the providers in the topology.  Collect any named providers in their roles list.
 +  // Scan through all of the loaded providers.  For each that doesn't have an existing provider in the role
 +  // list add it.
 +  private static Map<String,List<ProviderDeploymentContributor>> selectContextProviders( Topology topology ) {
 +    Map<String,List<ProviderDeploymentContributor>> providers = new LinkedHashMap<String, List<ProviderDeploymentContributor>>();
++    addMissingDefaultProviders(topology);
 +    collectTopologyProviders( topology, providers );
 +    collectDefaultProviders( providers );
 +    return providers;
 +  }
 +
++  private static void addMissingDefaultProviders(Topology topology) {
++    Collection<Provider> providers = topology.getProviders();
++    HashMap<String, String> providerMap = new HashMap<>();
++    for (Provider provider : providers) {
++      providerMap.put(provider.getRole(), provider.getName());
++    }
++    // first make sure that the required provider is available from the serviceloaders
++    // for some tests the number of providers are limited to the classpath of the module
++    // and exceptions will be thrown as topologies are deployed even though they will
++    // work fine at actual server runtime.
++    if (PROVIDER_CONTRIBUTOR_MAP.get("identity-assertion") != null) {
++      // check for required providers and add the defaults if missing
++      if (!providerMap.containsKey("identity-assertion")) {
++        Provider idassertion = new Provider();
++        idassertion.setRole("identity-assertion");
++        idassertion.setName("Default");
++        idassertion.setEnabled(true);
++        providers.add(idassertion);
++      }
++    }
++  }
++
 +  private static void collectTopologyProviders(
 +      Topology topology, Map<String, List<ProviderDeploymentContributor>> defaults ) {
 +    for( Provider provider : topology.getProviders() ) {
 +      String name = provider.getName();
 +      if( name != null ) {
 +        String role = provider.getRole();
 +        Map<String,ProviderDeploymentContributor> nameMap = PROVIDER_CONTRIBUTOR_MAP.get( role );
 +        if( nameMap != null ) {
 +          ProviderDeploymentContributor contributor = nameMap.get( name );
 +          // If there isn't a contributor with this role/name try to find a "*" contributor.
 +          if( contributor == null ) {
 +            nameMap = PROVIDER_CONTRIBUTOR_MAP.get( "*" );
 +            if( nameMap != null ) {
 +              contributor = nameMap.get( name );
 +            }
 +          }
 +          if( contributor != null ) {
 +            List list = defaults.get( role );
 +            if( list == null ) {
 +              list = new ArrayList( 1 );
 +              defaults.put( role, list );
 +            }
 +            if( !list.contains( contributor ) ) {
 +              list.add( contributor );
 +            }
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void collectDefaultProviders( Map<String,List<ProviderDeploymentContributor>> defaults ) {
 +    for( ProviderDeploymentContributor contributor : PROVIDER_CONTRIBUTORS ) {
 +      String role = contributor.getRole();
 +      List<ProviderDeploymentContributor> list = defaults.get( role );
 +      if( list == null ) {
 +        list = new ArrayList<ProviderDeploymentContributor>();
 +        defaults.put( role, list );
 +      }
 +      if( list.isEmpty() ) {
 +        list.add( contributor );
 +      }
 +    }
 +  }
 +
 +  // Scan through the services in the topology.
 +  // For each that we find add it to the list of service roles included in the topology.
 +  private static Map<String,List<ServiceDeploymentContributor>> selectContextServices( Topology topology ) {
 +    Map<String,List<ServiceDeploymentContributor>> defaults
 +        = new HashMap<>();
 +    for( Service service : topology.getServices() ) {
 +      String role = service.getRole();
 +      ServiceDeploymentContributor contributor = getServiceContributor( role, service.getName(), service.getVersion() );
 +      if( contributor != null ) {
 +        List<ServiceDeploymentContributor> list = defaults.get( role );
 +        if( list == null ) {
 +          list = new ArrayList<ServiceDeploymentContributor>( 1 );
 +          defaults.put( role, list );
 +        }
 +        if( !list.contains( contributor ) ) {
 +          list.add( contributor );
 +        }
 +      }
 +    }
 +    return defaults;
 +  }
 +
 +  private static Map<String,ServiceDeploymentContributor> selectContextApplications(
 +      GatewayConfig config, Topology topology ) {
 +    Map<String,ServiceDeploymentContributor> contributors = new HashMap<>();
 +    if( topology != null ) {
 +      for( Application application : topology.getApplications() ) {
 +        String name = application.getName();
 +        if( name == null || name.isEmpty() ) {
 +          throw new DeploymentException( "Topologies cannot contain an application without a name." );
 +        }
 +        ApplicationDeploymentContributor contributor = new ApplicationDeploymentContributor( config, application );
 +        List<String> urls = application.getUrls();
 +        if( urls == null || urls.isEmpty() ) {
 +          urls = new ArrayList<String>( 1 );
 +          urls.add( "/" + name );
 +        }
 +        for( String url : urls ) {
 +          if( url == null || url.isEmpty() || url.equals( "/" ) ) {
 +            if( !topology.getServices().isEmpty() ) {
 +              throw new DeploymentException( String.format(
 +                  "Topologies with services cannot contain an application (%s) with a root url.", name ) );
 +            }
 +          }
 +          contributors.put( url, contributor );
 +        }
 +      }
 +    }
 +    return contributors;
 +  }
 +
 +  private static void initialize(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services,
 +      Map.Entry<String,ServiceDeploymentContributor> applications ) {
 +    WebAppDescriptor wad = context.getWebAppDescriptor();
 +    String topoName = context.getTopology().getName();
 +    if( applications == null ) {
 +      String servletName = topoName + SERVLET_NAME_SUFFIX;
 +      wad.createServlet().servletName( servletName ).servletClass( GatewayServlet.class.getName() );
 +      wad.createServletMapping().servletName( servletName ).urlPattern( "/*" );
 +    } else {
 +      String filterName = topoName + FILTER_NAME_SUFFIX;
 +      wad.createFilter().filterName( filterName ).filterClass( GatewayServlet.class.getName() );
 +      wad.createFilterMapping().filterName( filterName ).urlPattern( "/*" );
 +    }
 +    if (gatewayServices != null) {
 +      gatewayServices.initializeContribution(context);
 +    } else {
 +      log.gatewayServicesNotInitialized();
 +    }
 +    initializeProviders( context, providers );
 +    initializeServices( context, services );
 +    initializeApplications( context, applications );
 +  }
 +
 +  private static void initializeProviders(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers ) {
 +    if( providers != null ) {
 +      for( Entry<String, List<ProviderDeploymentContributor>> entry : providers.entrySet() ) {
 +        for( ProviderDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            injectServices( contributor );
 +            log.initializeProvider( contributor.getName(), contributor.getRole() );
 +            contributor.initializeContribution( context );
 +          } catch( Exception e ) {
 +            log.failedToInitializeContribution( e );
 +            throw new DeploymentException( "Failed to initialize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void initializeServices( DeploymentContext context, Map<String, List<ServiceDeploymentContributor>> services ) {
 +    if( services != null ) {
 +      for( Entry<String, List<ServiceDeploymentContributor>> entry : services.entrySet() ) {
 +        for( ServiceDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            injectServices( contributor );
 +            log.initializeService( contributor.getName(), contributor.getRole() );
 +            contributor.initializeContribution( context );
 +          } catch( Exception e ) {
 +            log.failedToInitializeContribution( e );
 +            throw new DeploymentException( "Failed to initialize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void initializeApplications( DeploymentContext context, Map.Entry<String, ServiceDeploymentContributor> application ) {
 +    if( application != null ) {
 +      ServiceDeploymentContributor contributor = application.getValue();
 +      if( contributor != null ) {
 +        try {
 +          injectServices( contributor );
 +          log.initializeApplication( contributor.getName() );
 +          contributor.initializeContribution( context );
 +        } catch( Exception e ) {
 +          log.failedToInitializeContribution( e );
 +          throw new DeploymentException( "Failed to initialize application contribution.", e );
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void injectServices(Object contributor) {
 +    if (gatewayServices != null) {
 +      Statement stmt = null;
 +      for(String serviceName : gatewayServices.getServiceNames()) {
 +
 +        try {
 +          // TODO: this is just a temporary injection solution
 +          // TODO: test for the existence of the setter before attempting it
 +          // TODO: avoid exception throwing when there is no setter
 +          stmt = new Statement(contributor, "set" + serviceName, new Object[]{gatewayServices.getService(serviceName)});
 +          stmt.execute();
 +        } catch (NoSuchMethodException e) {
 +          // TODO: eliminate the possibility of this being thrown up front
 +        } catch (Exception e) {
 +          // Maybe it makes sense to throw exception
 +          log.failedToInjectService( serviceName, e );
 +          throw new DeploymentException("Failed to inject service.", e);
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void contribute(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services,
 +      Map.Entry<String,ServiceDeploymentContributor> applications ) {
 +    Topology topology = context.getTopology();
 +    contributeProviders( context, topology, providers );
 +    contributeServices( context, topology, services );
 +    contributeApplications( context, topology, applications );
 +  }
 +
 +  private static void contributeProviders( DeploymentContext context, Topology topology, Map<String, List<ProviderDeploymentContributor>> providers ) {
 +    for( Provider provider : topology.getProviders() ) {
 +      ProviderDeploymentContributor contributor = getProviderContributor( providers, provider.getRole(), provider.getName() );
 +      if( contributor != null && provider.isEnabled() ) {
 +        try {
 +          log.contributeProvider( provider.getName(), provider.getRole() );
 +          contributor.contributeProvider( context, provider );
 +        } catch( Exception e ) {
 +          // Maybe it makes sense to throw exception
 +          log.failedToContributeProvider( provider.getName(), provider.getRole(), e );
 +          throw new DeploymentException("Failed to contribute provider.", e);
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void contributeServices( DeploymentContext context, Topology topology, Map<String, List<ServiceDeploymentContributor>> services ) {
 +    if( services != null ) {
 +      for( Service service : topology.getServices() ) {
 +        ServiceDeploymentContributor contributor = getServiceContributor( service.getRole(), service.getName(), service.getVersion() );
 +        if( contributor != null ) {
 +          try {
 +            log.contributeService( service.getName(), service.getRole() );
 +            contributor.contributeService( context, service );
 +            if( gatewayServices != null ) {
 +              ServiceRegistry sr = gatewayServices.getService( GatewayServices.SERVICE_REGISTRY_SERVICE );
 +              if( sr != null ) {
 +                String regCode = sr.getRegistrationCode( topology.getName() );
 +                sr.registerService( regCode, topology.getName(), service.getRole(), service.getUrls() );
 +              }
 +            }
 +          } catch( Exception e ) {
 +            // Maybe it makes sense to throw exception
 +            log.failedToContributeService( service.getName(), service.getRole(), e );
 +            throw new DeploymentException( "Failed to contribute service.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void contributeApplications( DeploymentContext context, Topology topology, Map.Entry<String, ServiceDeploymentContributor> applications ) {
 +    if( applications != null ) {
 +      ServiceDeploymentContributor contributor = applications.getValue();
 +      if( contributor != null ) {
 +        try {
 +          log.contributeApplication( contributor.getName() );
 +          Application applicationDesc = topology.getApplication( applications.getKey() );
 +          contributor.contributeService( context, applicationDesc );
 +        } catch( Exception e ) {
 +          log.failedToInitializeContribution( e );
 +          throw new DeploymentException( "Failed to contribution application.", e );
 +        }
 +      }
 +    }
 +  }
 +
 +  public static ProviderDeploymentContributor getProviderContributor( String role, String name ) {
 +    ProviderDeploymentContributor contributor = null;
 +    Map<String,ProviderDeploymentContributor> nameMap = PROVIDER_CONTRIBUTOR_MAP.get( role );
 +    if( nameMap != null ) {
 +      if( name != null ) {
 +        contributor = nameMap.get( name );
 +      } else if ( !nameMap.isEmpty() ) {
 +        contributor = nameMap.values().iterator().next();
 +      }
 +    }
 +    return contributor;
 +  }
 +
 +  public static ServiceDeploymentContributor getServiceContributor( String role, String name, Version version ) {
 +    ServiceDeploymentContributor contributor = null;
 +    Map<String,Map<Version, ServiceDeploymentContributor>> nameMap = SERVICE_CONTRIBUTOR_MAP.get( role );
 +    if( nameMap != null && !nameMap.isEmpty()) {
 +      Map<Version, ServiceDeploymentContributor> versionMap = null;
 +      if ( name == null ) {
 +        versionMap = nameMap.values().iterator().next();
 +      } else {
 +        versionMap = nameMap.get( name );
 +      }
 +      if ( versionMap != null && !versionMap.isEmpty()) {
 +        if( version == null ) {
 +          contributor = ((TreeMap<Version, ServiceDeploymentContributor>) versionMap).firstEntry().getValue();
 +        } else {
 +          contributor = versionMap.get( version );
 +        }
 +      }
 +    }
 +    return contributor;
 +  }
 +
 +  private static void finalize(
 +      DeploymentContext context,
 +      Map<String,List<ProviderDeploymentContributor>> providers,
 +      Map<String,List<ServiceDeploymentContributor>> services,
 +      Map.Entry<String,ServiceDeploymentContributor> application ) {
 +    try {
 +      // Write the gateway descriptor (gateway.xml) into the war.
 +      StringWriter writer = new StringWriter();
 +      GatewayDescriptorFactory.store( context.getGatewayDescriptor(), "xml", writer );
 +      context.getWebArchive().addAsWebInfResource(
 +          new StringAsset( writer.toString() ),
 +          GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_DEFAULT );
 +
 +      // Set the location of the gateway descriptor as a servlet init param.
 +      if( application == null ) {
 +        String servletName = context.getTopology().getName() + SERVLET_NAME_SUFFIX;
 +        ServletType<WebAppDescriptor> servlet = findServlet( context, servletName );
 +        // Coverity CID 1352314
 +        if( servlet == null ) {
 +          throw new DeploymentException( "Missing servlet " + servletName );
 +        } else {
 +          servlet.createInitParam()
 +              .paramName( GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_PARAM )
 +              .paramValue( "/WEB-INF/" + GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_DEFAULT );
 +        }
 +      } else {
 +        String servletName = context.getTopology().getName() + FILTER_NAME_SUFFIX;
 +        FilterType<WebAppDescriptor> filter = findFilter( context, servletName );
 +        // Coverity CID 1352313
 +        if( filter == null ) {
 +          throw new DeploymentException( "Missing filter " + servletName );
 +        } else {
 +          filter.createInitParam()
 +              .paramName( GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_PARAM )
 +              .paramValue( "/WEB-INF/" + GatewayServlet.GATEWAY_DESCRIPTOR_LOCATION_DEFAULT );
 +        }
 +      }
 +      if (gatewayServices != null) {
 +        gatewayServices.finalizeContribution(context);
 +      }
 +      finalizeProviders( context, providers );
 +      finalizeServices( context, services );
 +      finalizeApplications( context, application );
 +      writeDeploymentDescriptor( context, application != null );
 +    } catch ( IOException e ) {
 +      throw new RuntimeException( e );
 +    }
 +  }
 +
 +  private static void finalizeProviders( DeploymentContext context, Map<String, List<ProviderDeploymentContributor>> providers ) {
 +    if( providers != null ) {
 +      for( Entry<String, List<ProviderDeploymentContributor>> entry : providers.entrySet() ) {
 +        for( ProviderDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            log.finalizeProvider( contributor.getName(), contributor.getRole() );
 +            contributor.finalizeContribution( context );
 +          } catch( Exception e ) {
 +            // Maybe it makes sense to throw exception
 +            log.failedToFinalizeContribution( e );
 +            throw new DeploymentException( "Failed to finalize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void finalizeServices( DeploymentContext context, Map<String, List<ServiceDeploymentContributor>> services ) {
 +    if( services != null ) {
 +      for( Entry<String, List<ServiceDeploymentContributor>> entry : services.entrySet() ) {
 +        for( ServiceDeploymentContributor contributor : entry.getValue() ) {
 +          try {
 +            log.finalizeService( contributor.getName(), contributor.getRole() );
 +            contributor.finalizeContribution( context );
 +          } catch( Exception e ) {
 +            // Maybe it makes sense to throw exception
 +            log.failedToFinalizeContribution( e );
 +            throw new DeploymentException( "Failed to finalize contribution.", e );
 +          }
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void finalizeApplications( DeploymentContext context, Map.Entry<String, ServiceDeploymentContributor> application ) {
 +    if( application != null ) {
 +      ServiceDeploymentContributor contributor = application.getValue();
 +      if( contributor != null ) {
 +        try {
 +          log.finalizeApplication( contributor.getName() );
 +          contributor.finalizeContribution( context );
 +        } catch( Exception e ) {
 +          log.failedToInitializeContribution( e );
 +          throw new DeploymentException( "Failed to contribution application.", e );
 +        }
 +      }
 +    }
 +  }
 +
 +  private static void writeDeploymentDescriptor( DeploymentContext context, boolean override ) {
 +    // Write the web.xml into the war.
 +    Asset webXmlAsset = new StringAsset( context.getWebAppDescriptor().exportAsString() );
 +    if( override ) {
 +      context.getWebArchive().addAsWebInfResource( webXmlAsset, "override-web.xml" );
 +    } else {
 +      context.getWebArchive().setWebXML( webXmlAsset );
 +    }
 +  }
 +
 +  public static ServletType<WebAppDescriptor> findServlet( DeploymentContext context, String name ) {
 +    List<ServletType<WebAppDescriptor>> servlets = context.getWebAppDescriptor().getAllServlet();
 +    for( ServletType<WebAppDescriptor> servlet : servlets ) {
 +      if( name.equals( servlet.getServletName() ) ) {
 +        return servlet;
 +      }
 +    }
 +    return null;
 +  }
 +
 +  public static FilterType<WebAppDescriptor> findFilter( DeploymentContext context, String name ) {
 +    List<FilterType<WebAppDescriptor>> filters = context.getWebAppDescriptor().getAllFilter();
 +    for( FilterType<WebAppDescriptor> filter : filters ) {
 +      if( name.equals( filter.getFilterName() ) ) {
 +        return filter;
 +      }
 +    }
 +    return null;
 +  }
 +
 +  private static void loadStacksServiceContributors( GatewayConfig config ) {
 +    String stacks = config.getGatewayServicesDir();
 +    log.usingServicesDirectory(stacks);
 +    File stacksDir = new File(stacks);
 +    Set<ServiceDeploymentContributor> deploymentContributors = ServiceDefinitionsLoader
 +        .loadServiceDefinitions(stacksDir);
 +    addServiceDeploymentContributors(deploymentContributors.iterator());
 +  }
 +
 +  private static void loadServiceContributors() {
 +    SERVICE_CONTRIBUTOR_MAP = new HashMap<>();
 +    ServiceLoader<ServiceDeploymentContributor> loader = ServiceLoader.load( ServiceDeploymentContributor.class );
 +    Iterator<ServiceDeploymentContributor> contributors = loader.iterator();
 +    addServiceDeploymentContributors(contributors);
 +  }
 +
 +   private static void addServiceDeploymentContributors(Iterator<ServiceDeploymentContributor> contributors) {
 +      while( contributors.hasNext() ) {
 +        ServiceDeploymentContributor contributor = contributors.next();
 +        if( contributor.getName() == null ) {
 +          log.ignoringServiceContributorWithMissingName( contributor.getClass().getName() );
 +          continue;
 +        }
 +        if( contributor.getRole() == null ) {
 +          log.ignoringServiceContributorWithMissingRole( contributor.getClass().getName() );
 +          continue;
 +        }
 +        if( contributor.getVersion() == null ) {
 +          log.ignoringServiceContributorWithMissingVersion(contributor.getClass().getName());
 +          continue;
 +        }
 +        Map<String,Map<Version, ServiceDeploymentContributor>> nameMap = SERVICE_CONTRIBUTOR_MAP.get( contributor.getRole() );
 +        if( nameMap == null ) {
 +          nameMap = new HashMap<>();
 +          SERVICE_CONTRIBUTOR_MAP.put( contributor.getRole(), nameMap );
 +        }
 +        Map<Version, ServiceDeploymentContributor> versionMap = nameMap.get(contributor.getName());
 +        if (versionMap == null) {
 +          versionMap = new TreeMap<>();
 +          nameMap.put(contributor.getName(), versionMap);
 +        }
 +        versionMap.put( contributor.getVersion(), contributor );
 +      }
 +   }
 +
 +   private static void loadProviderContributors() {
 +    Set<ProviderDeploymentContributor> set = new HashSet<>();
 +    Map<String,Map<String,ProviderDeploymentContributor>> roleMap
 +        = new HashMap<>();
 +
 +    ServiceLoader<ProviderDeploymentContributor> loader = ServiceLoader.load( ProviderDeploymentContributor.class );
 +    Iterator<ProviderDeploymentContributor> contributors = loader.iterator();
 +    while( contributors.hasNext() ) {
 +      ProviderDeploymentContributor contributor = contributors.next();
 +      if( contributor.getName() == null ) {
 +        log.ignoringProviderContributorWithMissingName( contributor.getClass().getName() );
 +        continue;
 +      }
 +      if( contributor.getRole() == null ) {
 +        log.ignoringProviderContributorWithMissingRole( contributor.getClass().getName() );
 +        continue;
 +      }
 +      set.add( contributor );
 +      Map nameMap = roleMap.get( contributor.getRole() );
 +      if( nameMap == null ) {
 +        nameMap = new HashMap<>();
 +        roleMap.put( contributor.getRole(), nameMap );
 +      }
 +      nameMap.put( contributor.getName(), contributor );
 +    }
 +    PROVIDER_CONTRIBUTORS = set;
 +    PROVIDER_CONTRIBUTOR_MAP = roleMap;
 +  }
 +
 +  static ProviderDeploymentContributor getProviderContributor(
 +      Map<String,List<ProviderDeploymentContributor>> providers, String role, String name ) {
 +    ProviderDeploymentContributor contributor = null;
 +    if( name == null ) {
 +      List<ProviderDeploymentContributor> list = providers.get( role );
 +      if( list != null && !list.isEmpty() ) {
 +        contributor = list.get( 0 );
 +      }
 +    } else {
 +      contributor = getProviderContributor( role, name );
 +      // Explicit configuration that is wrong should just fail
 +      // rather than randomly select a provider. Implicit default
 +      // providers can be selected when no name is provided.
 +      if (contributor == null || !contributor.getRole().equals(role) ||
 +          !contributor.getName().equals(name)) {
 +        throw new DeploymentException(
 +            "Failed to contribute provider. Role: " +
 +            role + " Name: " + name + ". Please check the topology for" +
 +                          " errors in name and role and that the provider is " +
 +                          "on the classpath.");
 +      }
 +    }
 +    return contributor;
 +  }
 +}
index a056ac7,0000000..7e69af5
mode 100644,000000..100644
--- /dev/null
@@@ -1,256 -1,0 +1,264 @@@
-           addDispatchFilterForClass(context, service, resource, haClassName, httpClientFactory);
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.deploy.impl;
 +
 +import org.apache.knox.gateway.config.impl.GatewayConfigImpl;
 +import org.apache.knox.gateway.deploy.DeploymentContext;
 +import org.apache.knox.gateway.deploy.ServiceDeploymentContributorBase;
 +import org.apache.knox.gateway.descriptor.FilterDescriptor;
 +import org.apache.knox.gateway.descriptor.FilterParamDescriptor;
 +import org.apache.knox.gateway.descriptor.ResourceDescriptor;
 +import org.apache.knox.gateway.dispatch.GatewayDispatchFilter;
 +import org.apache.knox.gateway.filter.XForwardedHeaderFilter;
 +import org.apache.knox.gateway.filter.rewrite.api.CookieScopeServletFilter;
 +import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteRulesDescriptor;
 +import org.apache.knox.gateway.service.definition.CustomDispatch;
 +import org.apache.knox.gateway.service.definition.Policy;
 +import org.apache.knox.gateway.service.definition.Rewrite;
 +import org.apache.knox.gateway.service.definition.Route;
 +import org.apache.knox.gateway.service.definition.ServiceDefinition;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Version;
 +
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +public class ServiceDefinitionDeploymentContributor extends ServiceDeploymentContributorBase {
 +
 +  private static final String DISPATCH_ROLE = "dispatch";
 +
 +  private static final String DISPATCH_IMPL_PARAM = "dispatch-impl";
 +
 +  private static final String HTTP_CLIENT_FACTORY_PARAM = "httpClientFactory";
 +
 +  private static final String SERVICE_ROLE_PARAM = "serviceRole";
 +
 +  private static final String XFORWARDED_FILTER_NAME = "XForwardedHeaderFilter";
 +
 +  private static final String XFORWARDED_FILTER_ROLE = "xforwardedheaders";
 +
 +  private static final String DEFAULT_HA_DISPATCH_CLASS = "org.apache.knox.gateway.ha.dispatch.DefaultHaDispatch";
 +
 +  private static final String COOKIE_SCOPING_FILTER_NAME = "CookieScopeServletFilter";
 +
 +  private static final String COOKIE_SCOPING_FILTER_ROLE = "cookiescopef";
 +
 +  private ServiceDefinition serviceDefinition;
 +
 +  private UrlRewriteRulesDescriptor serviceRules;
 +
 +  public ServiceDefinitionDeploymentContributor(ServiceDefinition serviceDefinition, UrlRewriteRulesDescriptor serviceRules) {
 +    this.serviceDefinition = serviceDefinition;
 +    this.serviceRules = serviceRules;
 +  }
 +
 +  @Override
 +  public String getRole() {
 +    return serviceDefinition.getRole();
 +  }
 +
 +  @Override
 +  public String getName() {
 +    return serviceDefinition.getName();
 +  }
 +
 +  @Override
 +  public Version getVersion() {
 +    return new Version(serviceDefinition.getVersion());
 +  }
 +
 +  @Override
 +  public void contributeService(DeploymentContext context, Service service) throws Exception {
 +    contributeRewriteRules(context, service);
 +    contributeResources(context, service);
 +  }
 +
 +  private void contributeRewriteRules(DeploymentContext context, Service service) {
 +    if ( serviceRules != null ) {
 +      UrlRewriteRulesDescriptor clusterRules = context.getDescriptor("rewrite");
 +      clusterRules.addRules(serviceRules);
 +    }
 +  }
 +
 +  private void contributeResources(DeploymentContext context, Service service) {
 +    Map<String, String> filterParams = new HashMap<>();
 +    List<Route> bindings = serviceDefinition.getRoutes();
 +    for ( Route binding : bindings ) {
 +      List<Rewrite> filters = binding.getRewrites();
 +      if ( filters != null && !filters.isEmpty() ) {
 +        filterParams.clear();
 +        for ( Rewrite filter : filters ) {
 +          filterParams.put(filter.getTo(), filter.getApply());
 +        }
 +      }
 +      try {
 +        contributeResource(context, service, binding, filterParams);
 +      } catch ( URISyntaxException e ) {
 +        e.printStackTrace();
 +      }
 +    }
 +
 +  }
 +
 +  private void contributeResource(DeploymentContext context, Service service, Route binding, Map<String, String> filterParams) throws URISyntaxException {
 +    List<FilterParamDescriptor> params = new ArrayList<FilterParamDescriptor>();
 +    ResourceDescriptor resource = context.getGatewayDescriptor().addResource();
 +    resource.role(service.getRole());
 +    resource.pattern(binding.getPath());
 +    //add x-forwarded filter if enabled in config
 +    if (context.getGatewayConfig().isXForwardedEnabled()) {
 +      resource.addFilter().name(XFORWARDED_FILTER_NAME).role(XFORWARDED_FILTER_ROLE).impl(XForwardedHeaderFilter.class);
 +    }
 +    if (context.getGatewayConfig().isCookieScopingToPathEnabled()) {
 +      FilterDescriptor filter = resource.addFilter().name(COOKIE_SCOPING_FILTER_NAME).role(COOKIE_SCOPING_FILTER_ROLE).impl(CookieScopeServletFilter.class);
 +      filter.param().name(GatewayConfigImpl.HTTP_PATH).value(context.getGatewayConfig().getGatewayPath());
 +    }
 +    List<Policy> policyBindings = binding.getPolicies();
 +    if ( policyBindings == null ) {
 +      policyBindings = serviceDefinition.getPolicies();
 +    }
 +    if ( policyBindings == null ) {
 +      //add default set
 +      addDefaultPolicies(context, service, filterParams, params, resource);
 +    } else {
 +      addPolicies(context, service, filterParams, params, resource, policyBindings);
 +    }
 +    addDispatchFilter(context, service, resource, binding);
 +  }
 +
 +  private void addPolicies(DeploymentContext context, Service service, Map<String, String> filterParams, List<FilterParamDescriptor> params, ResourceDescriptor resource, List<Policy> policyBindings) throws URISyntaxException {
 +    for ( Policy policyBinding : policyBindings ) {
 +      String role = policyBinding.getRole();
 +      if ( role == null ) {
 +        throw new IllegalArgumentException("Policy defined has no role for service " + service.getName());
 +      }
 +      role = role.trim().toLowerCase();
 +      if ( "rewrite".equals(role) ) {
 +        addRewriteFilter(context, service, filterParams, params, resource);
 +      } else if ( topologyContainsProviderType(context, role) ) {
 +        context.contributeFilter(service, resource, role, policyBinding.getName(), null);
 +      }
 +    }
 +  }
 +
 +  private void addDefaultPolicies(DeploymentContext context, Service service, Map<String, String> filterParams, List<FilterParamDescriptor> params, ResourceDescriptor resource) throws URISyntaxException {
 +    addWebAppSecFilters(context, service, resource);
 +    addAuthenticationFilter(context, service, resource);
 +    addRewriteFilter(context, service, filterParams, params, resource);
 +    addIdentityAssertionFilter(context, service, resource);
 +    addAuthorizationFilter(context, service, resource);
 +  }
 +
 +  private void addRewriteFilter(DeploymentContext context, Service service, Map<String, String> filterParams, List<FilterParamDescriptor> params, ResourceDescriptor resource) throws URISyntaxException {
 +    if ( !filterParams.isEmpty() ) {
 +      for ( Map.Entry<String, String> filterParam : filterParams.entrySet() ) {
 +        params.add(resource.createFilterParam().name(filterParam.getKey()).value(filterParam.getValue()));
 +      }
 +    }
 +    addRewriteFilter(context, service, resource, params);
 +  }
 +
 +  private void addDispatchFilter(DeploymentContext context, Service service, ResourceDescriptor resource, Route binding) {
 +    CustomDispatch customDispatch = binding.getDispatch();
 +    if ( customDispatch == null ) {
 +      customDispatch = serviceDefinition.getDispatch();
 +    }
 +    boolean isHaEnabled = isHaEnabled(context);
 +    if ( customDispatch != null ) {
 +      String haContributorName = customDispatch.getHaContributorName();
 +      String haClassName = customDispatch.getHaClassName();
 +      String httpClientFactory = customDispatch.getHttpClientFactory();
++      boolean useTwoWaySsl = customDispatch.getUseTwoWaySsl();
 +      if ( isHaEnabled) {
 +        if (haContributorName != null) {
 +          addDispatchFilter(context, service, resource, DISPATCH_ROLE, haContributorName);
 +        } else if (haClassName != null) {
-             addDispatchFilterForClass(context, service, resource, className, httpClientFactory);
++          addDispatchFilterForClass(context, service, resource, haClassName, httpClientFactory, useTwoWaySsl);
 +        } else {
 +          addDefaultHaDispatchFilter(context, service, resource);
 +        }
 +      } else {
 +        String contributorName = customDispatch.getContributorName();
 +        if ( contributorName != null ) {
 +          addDispatchFilter(context, service, resource, DISPATCH_ROLE, contributorName);
 +        } else {
 +          String className = customDispatch.getClassName();
 +          if ( className != null ) {
-   private FilterDescriptor addDispatchFilterForClass(DeploymentContext context, Service service, ResourceDescriptor resource, String dispatchClass, String httpClientFactory) {
++            addDispatchFilterForClass(context, service, resource, className, httpClientFactory, useTwoWaySsl);
 +          } else {
 +            //final fallback to the default dispatch
 +            addDispatchFilter(context, service, resource, DISPATCH_ROLE, "http-client");
 +          }
 +        }
 +      }
 +    } else if (isHaEnabled) {
 +      addDefaultHaDispatchFilter(context, service, resource);
 +    } else {
 +      addDispatchFilter(context, service, resource, DISPATCH_ROLE, "http-client");
 +    }
 +  }
 +
 +  private void addDefaultHaDispatchFilter(DeploymentContext context, Service service, ResourceDescriptor resource) {
 +    FilterDescriptor filter = addDispatchFilterForClass(context, service, resource, DEFAULT_HA_DISPATCH_CLASS, null);
 +    filter.param().name(SERVICE_ROLE_PARAM).value(service.getRole());
 +  }
 +
++  private FilterDescriptor addDispatchFilterForClass(DeploymentContext context, Service service, ResourceDescriptor resource, String dispatchClass, String httpClientFactory, boolean useTwoWaySsl) {
 +    FilterDescriptor filter = resource.addFilter().name(getName()).role(DISPATCH_ROLE).impl(GatewayDispatchFilter.class);
 +    filter.param().name(DISPATCH_IMPL_PARAM).value(dispatchClass);
 +    if (httpClientFactory != null) {
 +      filter.param().name(HTTP_CLIENT_FACTORY_PARAM).value(httpClientFactory);
 +    }
++    // let's take the value of useTwoWaySsl which is derived from the service definition
++    // then allow it to be overridden by service params from the topology
++    filter.param().name("useTwoWaySsl").value(Boolean.toString(useTwoWaySsl));
 +    for ( Map.Entry<String, String> serviceParam : service.getParams().entrySet() ) {
 +      filter.param().name(serviceParam.getKey()).value(serviceParam.getValue());
 +    }
 +    if ( context.getGatewayConfig().isHadoopKerberosSecured() ) {
 +      filter.param().name("kerberos").value("true");
 +    } else {
 +      //TODO: [sumit] Get rid of special case. Add config/param capabilities to service definitions?
 +      //special case for hive
 +      filter.param().name("basicAuthPreemptive").value("true");
 +    }
 +    return filter;
 +  }
 +
++  private FilterDescriptor addDispatchFilterForClass(DeploymentContext context, Service service, ResourceDescriptor resource, String dispatchClass, String httpClientFactory) {
++    return addDispatchFilterForClass(context, service, resource, dispatchClass, httpClientFactory, false);
++  }
++
 +  private boolean isHaEnabled(DeploymentContext context) {
 +    Provider provider = getProviderByRole(context, "ha");
 +    if ( provider != null && provider.isEnabled() ) {
 +      Map<String, String> params = provider.getParams();
 +      if ( params != null ) {
 +        if ( params.containsKey(getRole()) ) {
 +          return true;
 +        }
 +      }
 +    }
 +    return false;
 +  }
 +
 +}
index 073adcd,0000000..1299d6f
mode 100644,000000..100644
--- /dev/null
@@@ -1,71 -1,0 +1,70 @@@
-     return  HttpClientBuilder.create().setRequestExecutor(new InstrumentedHttpRequestExecutor(registry, TOPOLOGY_URL_AND_METHOD)).
-         setConnectionManager(new PoolingHttpClientConnectionManager());
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.services.metrics.impl.instr;
 +
 +import com.codahale.metrics.MetricRegistry;
 +import com.codahale.metrics.httpclient.HttpClientMetricNameStrategy;
 +import com.codahale.metrics.httpclient.InstrumentedHttpRequestExecutor;
 +import org.apache.knox.gateway.services.metrics.InstrumentationProvider;
 +import org.apache.knox.gateway.services.metrics.MetricsContext;
 +import org.apache.knox.gateway.services.metrics.impl.DefaultMetricsService;
 +import org.apache.http.Header;
 +import org.apache.http.HttpRequest;
 +import org.apache.http.RequestLine;
 +import org.apache.http.client.utils.URIBuilder;
 +import org.apache.http.impl.client.HttpClientBuilder;
 +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
 +
 +import java.net.URISyntaxException;
 +
 +public class InstrHttpClientBuilderProvider implements
 +    InstrumentationProvider<HttpClientBuilder> {
 +
 +  @Override
 +  public HttpClientBuilder getInstrumented(MetricsContext metricsContext) {
 +    MetricRegistry registry = (MetricRegistry) metricsContext.getProperty(DefaultMetricsService.METRICS_REGISTRY);
++    return  HttpClientBuilder.create().setRequestExecutor(new InstrumentedHttpRequestExecutor(registry, TOPOLOGY_URL_AND_METHOD));
 +  }
 +
 +  @Override
 +  public HttpClientBuilder getInstrumented(HttpClientBuilder instanceClass, MetricsContext metricsContext) {
 +    throw new UnsupportedOperationException();
 +  }
 +
 +  private static final HttpClientMetricNameStrategy TOPOLOGY_URL_AND_METHOD = new HttpClientMetricNameStrategy() {
 +    public String getNameFor(String name, HttpRequest request) {
 +      try {
 +        String context = "";
 +        Header header = request.getFirstHeader("X-Forwarded-Context");
 +        if (header != null) {
 +          context = header.getValue();
 +        }
 +        RequestLine requestLine = request.getRequestLine();
 +        URIBuilder uriBuilder = new URIBuilder(requestLine.getUri());
 +        String resourcePath = InstrUtils.getResourcePath(uriBuilder.removeQuery().build().toString());
 +        return MetricRegistry.name("service", new String[]{name, context + resourcePath, methodNameString(request)});
 +      } catch (URISyntaxException e) {
 +        throw new IllegalArgumentException(e);
 +      }
 +    }
 +
 +    private String methodNameString(HttpRequest request) {
 +      return request.getRequestLine().getMethod().toLowerCase() + "-requests";
 +    }
 +  };
 +}
index a1a2609,0000000..afeade0
mode 100644,000000..100644
--- /dev/null
@@@ -1,105 -1,0 +1,116 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.builder;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +
 +public class BeanPropertyTopologyBuilder implements TopologyBuilder {
 +
 +    private String name;
 +    private String defaultService;
++    private boolean isGenerated;
 +    private List<Provider> providers;
 +    private List<Service> services;
 +    private List<Application> applications;
 +
 +    public BeanPropertyTopologyBuilder() {
 +        providers = new ArrayList<Provider>();
 +        services = new ArrayList<Service>();
 +        applications = new ArrayList<Application>();
 +    }
 +
 +    public BeanPropertyTopologyBuilder name(String name) {
 +        this.name = name;
 +        return this;
 +    }
 +
 +    public String name() {
 +        return name;
 +    }
 +
++    public BeanPropertyTopologyBuilder generated(String isGenerated) {
++        this.isGenerated = Boolean.valueOf(isGenerated);
++        return this;
++    }
++
++    public boolean isGenerated() {
++        return isGenerated;
++    }
++
 +    public BeanPropertyTopologyBuilder defaultService(String defaultService) {
 +      this.defaultService = defaultService;
 +      return this;
 +    }
 +
 +    public String defaultService() {
 +      return defaultService;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addProvider(Provider provider) {
 +        providers.add(provider);
 +        return this;
 +    }
 +
 +    public List<Provider> providers() {
 +        return providers;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addService(Service service) {
 +        services.add(service);
 +        return this;
 +    }
 +
 +    public List<Service> services() {
 +        return services;
 +    }
 +
 +    public BeanPropertyTopologyBuilder addApplication( Application application ) {
 +        applications.add(application);
 +        return this;
 +    }
 +
 +    public List<Application> applications() {
 +        return applications;
 +    }
 +
 +    public Topology build() {
 +        Topology topology = new Topology();
 +        topology.setName(name);
 +        topology.setDefaultServicePath(defaultService);
++        topology.setGenerated(isGenerated);
 +
 +        for (Provider provider : providers) {
 +            topology.addProvider(provider);
 +        }
 +
 +        for (Service service : services) {
 +            topology.addService(service);
 +        }
 +
 +        for (Application application : applications) {
 +            topology.addApplication(application);
 +        }
 +
 +        return topology;
 +    }
 +}
index 25997b1,0000000..7d25286
mode 100644,000000..100644
--- /dev/null
@@@ -1,48 -1,0 +1,58 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import java.util.List;
 +import java.util.Map;
 +
 +public interface SimpleDescriptor {
 +
 +    String getName();
 +
 +    String getDiscoveryType();
 +
 +    String getDiscoveryAddress();
 +
 +    String getDiscoveryUser();
 +
 +    String getDiscoveryPasswordAlias();
 +
 +    String getClusterName();
 +
 +    String getProviderConfig();
 +
 +    List<Service> getServices();
 +
++    List<Application> getApplications();
++
 +
 +    interface Service {
 +        String getName();
 +
 +        Map<String, String> getParams();
 +
 +        List<String> getURLs();
 +    }
++
++    interface Application {
++        String getName();
++
++        Map<String, String> getParams();
++
++        List<String> getURLs();
++    }
 +}
index b54432d,0000000..2e3214d
mode 100644,000000..100644
--- /dev/null
@@@ -1,267 -1,0 +1,316 @@@
-         ServiceDiscovery sd = ServiceDiscoveryFactory.get(desc.getDiscoveryType(), gatewayServices);
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.services.Service;
 +import org.apache.knox.gateway.topology.discovery.DefaultServiceDiscoveryConfig;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscovery;
 +import org.apache.knox.gateway.topology.discovery.ServiceDiscoveryFactory;
 +import java.io.BufferedWriter;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileWriter;
 +import java.io.InputStreamReader;
 +import java.io.IOException;
 +
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +
 +
 +/**
 + * Processes simple topology descriptors, producing full topology files, which can subsequently be deployed to the
 + * gateway.
 + */
 +public class SimpleDescriptorHandler {
 +
 +    private static final Service[] NO_GATEWAY_SERVICES = new Service[]{};
 +
 +    private static final SimpleDescriptorMessages log = MessagesFactory.get(SimpleDescriptorMessages.class);
 +
 +    public static Map<String, File> handle(File desc) throws IOException {
 +        return handle(desc, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, Service...gatewayServices) throws IOException {
 +        return handle(desc, desc.getParentFile(), gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory) throws IOException {
 +        return handle(desc, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(File desc, File destDirectory, Service...gatewayServices) throws IOException {
 +        return handle(SimpleDescriptorFactory.parse(desc.getAbsolutePath()), desc.getParentFile(), destDirectory, gatewayServices);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory) {
 +        return handle(desc, srcDirectory, destDirectory, NO_GATEWAY_SERVICES);
 +    }
 +
 +    public static Map<String, File> handle(SimpleDescriptor desc, File srcDirectory, File destDirectory, Service...gatewayServices) {
 +        Map<String, File> result = new HashMap<>();
 +
 +        File topologyDescriptor;
 +
 +        DefaultServiceDiscoveryConfig sdc = new DefaultServiceDiscoveryConfig(desc.getDiscoveryAddress());
 +        sdc.setUser(desc.getDiscoveryUser());
 +        sdc.setPasswordAlias(desc.getDiscoveryPasswordAlias());
++
++        // Use the discovery type from the descriptor. If it's unspecified, employ the default type.
++        String discoveryType = desc.getDiscoveryType();
++        if (discoveryType == null) {
++            discoveryType = "AMBARI";
++        }
++
++        ServiceDiscovery sd = ServiceDiscoveryFactory.get(discoveryType, gatewayServices);
 +        ServiceDiscovery.Cluster cluster = sd.discover(sdc, desc.getClusterName());
 +
 +        List<String> validServiceNames = new ArrayList<>();
 +
 +        Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, List<String>>        serviceURLs   = new HashMap<>();
 +
 +        if (cluster != null) {
 +            for (SimpleDescriptor.Service descService : desc.getServices()) {
 +                String serviceName = descService.getName();
 +
 +                List<String> descServiceURLs = descService.getURLs();
 +                if (descServiceURLs == null || descServiceURLs.isEmpty()) {
 +                    descServiceURLs = cluster.getServiceURLs(serviceName);
 +                }
 +
 +                // Validate the discovered service URLs
 +                List<String> validURLs = new ArrayList<>();
 +                if (descServiceURLs != null && !descServiceURLs.isEmpty()) {
 +                    // Validate the URL(s)
 +                    for (String descServiceURL : descServiceURLs) {
 +                        if (validateURL(serviceName, descServiceURL)) {
 +                            validURLs.add(descServiceURL);
 +                        }
 +                    }
 +
 +                    if (!validURLs.isEmpty()) {
 +                        validServiceNames.add(serviceName);
 +                    }
 +                }
 +
 +                // If there is at least one valid URL associated with the service, then add it to the map
 +                if (!validURLs.isEmpty()) {
 +                    serviceURLs.put(serviceName, validURLs);
 +                } else {
 +                    log.failedToDiscoverClusterServiceURLs(serviceName, cluster.getName());
 +                }
 +
 +                // Service params
 +                if (descService.getParams() != null) {
 +                    serviceParams.put(serviceName, descService.getParams());
 +                    if (!validServiceNames.contains(serviceName)) {
 +                        validServiceNames.add(serviceName);
 +                    }
 +                }
 +            }
 +        } else {
 +            log.failedToDiscoverClusterServices(desc.getClusterName());
 +        }
 +
 +        BufferedWriter fw = null;
 +        topologyDescriptor = null;
 +        File providerConfig;
 +        try {
 +            // Verify that the referenced provider configuration exists before attempting to reading it
 +            providerConfig = resolveProviderConfigurationReference(desc.getProviderConfig(), srcDirectory);
 +            if (providerConfig == null) {
 +                log.failedToResolveProviderConfigRef(desc.getProviderConfig());
 +                throw new IllegalArgumentException("Unresolved provider configuration reference: " +
 +                                                   desc.getProviderConfig() + " ; Topology update aborted!");
 +            }
 +            result.put("reference", providerConfig);
 +
 +            // TODO: Should the contents of the provider config be validated before incorporating it into the topology?
 +
 +            String topologyFilename = desc.getName();
 +            if (topologyFilename == null) {
 +                topologyFilename = desc.getClusterName();
 +            }
 +            topologyDescriptor = new File(destDirectory, topologyFilename + ".xml");
++
 +            fw = new BufferedWriter(new FileWriter(topologyDescriptor));
 +
++            fw.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
++
++            fw.write("<!--==============================================-->\n");
++            fw.write("<!-- DO NOT EDIT. This is an auto-generated file. -->\n");
++            fw.write("<!--==============================================-->\n");
++
 +            fw.write("<topology>\n");
 +
++            // KNOX-1105 Indicate that this topology was auto-generated
++            fw.write("    <generated>true</generated>\n");
++
 +            // Copy the externalized provider configuration content into the topology descriptor in-line
 +            InputStreamReader policyReader = new InputStreamReader(new FileInputStream(providerConfig));
 +            char[] buffer = new char[1024];
 +            int count;
 +            while ((count = policyReader.read(buffer)) > 0) {
 +                fw.write(buffer, 0, count);
 +            }
 +            policyReader.close();
 +
++            // Services
 +            // Sort the service names to write the services alphabetically
 +            List<String> serviceNames = new ArrayList<>(validServiceNames);
 +            Collections.sort(serviceNames);
 +
 +            // Write the service declarations
 +            for (String serviceName : serviceNames) {
 +                fw.write("    <service>\n");
 +                fw.write("        <role>" + serviceName + "</role>\n");
 +
 +                // URLs
 +                List<String> urls = serviceURLs.get(serviceName);
 +                if (urls != null) {
 +                    for (String url : urls) {
 +                        fw.write("        <url>" + url + "</url>\n");
 +                    }
 +                }
 +
 +                // Params
 +                Map<String, String> svcParams = serviceParams.get(serviceName);
 +                if (svcParams != null) {
 +                    for (String paramName : svcParams.keySet()) {
 +                        fw.write("        <param>\n");
 +                        fw.write("            <name>" + paramName + "</name>\n");
 +                        fw.write("            <value>" + svcParams.get(paramName) + "</value>\n");
 +                        fw.write("        </param>\n");
 +                    }
 +                }
 +
 +                fw.write("    </service>\n");
 +            }
 +
++            // Applications
++            List<SimpleDescriptor.Application> apps = desc.getApplications();
++            if (apps != null) {
++                for (SimpleDescriptor.Application app : apps) {
++                    fw.write("    <application>\n");
++                    fw.write("        <name>" + app.getName() + "</name>\n");
++
++                    // URLs
++                    List<String> urls = app.getURLs();
++                    if (urls != null) {
++                        for (String url : urls) {
++                            fw.write("        <url>" + url + "</url>\n");
++                        }
++                    }
++
++                    // Params
++                    Map<String, String> appParams = app.getParams();
++                    if (appParams != null) {
++                        for (String paramName : appParams.keySet()) {
++                            fw.write("        <param>\n");
++                            fw.write("            <name>" + paramName + "</name>\n");
++                            fw.write("            <value>" + appParams.get(paramName) + "</value>\n");
++                            fw.write("        </param>\n");
++                        }
++                    }
++
++                    fw.write("    </application>\n");
++                }
++            }
++
 +            fw.write("</topology>\n");
 +
 +            fw.flush();
 +        } catch (IOException e) {
 +            log.failedToGenerateTopologyFromSimpleDescriptor(topologyDescriptor.getName(), e);
 +            topologyDescriptor.delete();
 +        } finally {
 +            if (fw != null) {
 +                try {
 +                    fw.close();
 +                } catch (IOException e) {
 +                    // ignore
 +                }
 +            }
 +        }
 +
 +        result.put("topology", topologyDescriptor);
 +        return result;
 +    }
 +
++
 +    private static boolean validateURL(String serviceName, String url) {
 +        boolean result = false;
 +
 +        if (url != null && !url.isEmpty()) {
 +            try {
 +                new URI(url);
 +                result = true;
 +            } catch (URISyntaxException e) {
 +                log.serviceURLValidationFailed(serviceName, url, e);
 +            }
 +        }
 +
 +        return result;
 +    }
 +
 +
 +    private static File resolveProviderConfigurationReference(String reference, File srcDirectory) {
 +        File providerConfig;
 +
 +        // If the reference includes a path
 +        if (reference.contains(File.separator)) {
 +            // Check if it's an absolute path
 +            providerConfig = new File(reference);
 +            if (!providerConfig.exists()) {
 +                // If it's not an absolute path, try treating it as a relative path
 +                providerConfig = new File(srcDirectory, reference);
 +                if (!providerConfig.exists()) {
 +                    providerConfig = null;
 +                }
 +            }
 +        } else { // No file path, just a name
 +            // Check if it's co-located with the referencing descriptor
 +            providerConfig = new File(srcDirectory, reference);
 +            if (!providerConfig.exists()) {
 +                // Check the shared-providers config location
 +                File sharedProvidersDir = new File(srcDirectory, "../shared-providers");
 +                if (sharedProvidersDir.exists()) {
 +                    providerConfig = new File(sharedProvidersDir, reference);
 +                    if (!providerConfig.exists()) {
 +                        // Check if it's a valid name without the extension
 +                        providerConfig = new File(sharedProvidersDir, reference + ".xml");
 +                        if (!providerConfig.exists()) {
 +                            providerConfig = null;
 +                        }
 +                    }
 +                }
 +            }
 +        }
 +
 +        return providerConfig;
 +    }
 +
 +}
index 4eb1954,0000000..f3288fd
mode 100644,000000..100644
--- /dev/null
@@@ -1,123 -1,0 +1,163 @@@
-         result.addAll(services);
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import com.fasterxml.jackson.annotation.JsonProperty;
 +
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Map;
 +
 +class SimpleDescriptorImpl implements SimpleDescriptor {
 +
 +    @JsonProperty("discovery-type")
 +    private String discoveryType;
 +
 +    @JsonProperty("discovery-address")
 +    private String discoveryAddress;
 +
 +    @JsonProperty("discovery-user")
 +    private String discoveryUser;
 +
 +    @JsonProperty("discovery-pwd-alias")
 +    private String discoveryPasswordAlias;
 +
 +    @JsonProperty("provider-config-ref")
 +    private String providerConfig;
 +
 +    @JsonProperty("cluster")
 +    private String cluster;
 +
 +    @JsonProperty("services")
 +    private List<ServiceImpl> services;
 +
++    @JsonProperty("applications")
++    private List<ApplicationImpl> applications;
++
 +    private String name = null;
 +
 +    void setName(String name) {
 +        this.name = name;
 +    }
 +
 +    @Override
 +    public String getName() {
 +        return name;
 +    }
 +
 +    @Override
 +    public String getDiscoveryType() {
 +        return discoveryType;
 +    }
 +
 +    @Override
 +    public String getDiscoveryAddress() {
 +        return discoveryAddress;
 +    }
 +
 +    @Override
 +    public String getDiscoveryUser() {
 +        return discoveryUser;
 +    }
 +
 +    @Override
 +    public String getDiscoveryPasswordAlias() {
 +        return discoveryPasswordAlias;
 +    }
 +
 +    @Override
 +    public String getClusterName() {
 +        return cluster;
 +    }
 +
 +    @Override
 +    public String getProviderConfig() {
 +        return providerConfig;
 +    }
 +
 +    @Override
 +    public List<Service> getServices() {
 +        List<Service> result = new ArrayList<>();
++        if (services != null) {
++            result.addAll(services);
++        }
++        return result;
++    }
++
++    @Override
++    public List<Application> getApplications() {
++        List<Application> result = new ArrayList<>();
++        if (applications != null) {
++            result.addAll(applications);
++        }
 +        return result;
 +    }
 +
 +    public static class ServiceImpl implements Service {
 +        @JsonProperty("name")
 +        private String name;
 +
 +        @JsonProperty("params")
 +        private Map<String, String> params;
 +
 +        @JsonProperty("urls")
 +        private List<String> urls;
 +
 +        @Override
 +        public String getName() {
 +            return name;
 +        }
 +
 +        @Override
 +        public Map<String, String> getParams() {
 +            return params;
 +        }
 +
 +        @Override
 +        public List<String> getURLs() {
 +            return urls;
 +        }
 +    }
 +
++    public static class ApplicationImpl implements Application {
++        @JsonProperty("name")
++        private String name;
++
++        @JsonProperty("params")
++        private Map<String, String> params;
++
++        @JsonProperty("urls")
++        private List<String> urls;
++
++        @Override
++        public String getName() {
++            return name;
++        }
++
++        @Override
++        public Map<String, String> getParams() {
++            return params;
++        }
++
++        @Override
++        public List<String> getURLs() {
++            return urls;
++        }
++    }
++
 +}
index 81aedec,0000000..a1fcb6d
mode 100644,000000..100644
--- /dev/null
@@@ -1,95 -1,0 +1,97 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.xml;
 +
 +import org.apache.commons.digester3.Rule;
 +import org.apache.commons.digester3.binder.AbstractRulesModule;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Version;
 +import org.apache.knox.gateway.topology.builder.BeanPropertyTopologyBuilder;
 +import org.xml.sax.Attributes;
 +
 +public class KnoxFormatXmlTopologyRules extends AbstractRulesModule {
 +
 +  private static final String ROOT_TAG = "topology";
 +  private static final String NAME_TAG = "name";
 +  private static final String VERSION_TAG = "version";
 +  private static final String DEFAULT_SERVICE_TAG = "path";
++  private static final String GENERATED_TAG = "generated";
 +  private static final String APPLICATION_TAG = "application";
 +  private static final String SERVICE_TAG = "service";
 +  private static final String ROLE_TAG = "role";
 +  private static final String URL_TAG = "url";
 +  private static final String PROVIDER_TAG = "gateway/provider";
 +  private static final String ENABLED_TAG = "enabled";
 +  private static final String PARAM_TAG = "param";
 +  private static final String VALUE_TAG = "value";
 +
 +  private static final Rule paramRule = new ParamRule();
 +
 +  @Override
 +  protected void configure() {
 +    forPattern( ROOT_TAG ).createObject().ofType( BeanPropertyTopologyBuilder.class );
 +    forPattern( ROOT_TAG + "/" + NAME_TAG ).callMethod("name").usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + VERSION_TAG ).callMethod("version").usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + DEFAULT_SERVICE_TAG ).callMethod("defaultService").usingElementBodyAsArgument();
++    forPattern( ROOT_TAG + "/" + GENERATED_TAG ).callMethod("generated").usingElementBodyAsArgument();
 +
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG ).createObject().ofType( Application.class ).then().setNext( "addApplication" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + APPLICATION_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG ).createObject().ofType( Service.class ).then().setNext( "addService" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + VERSION_TAG ).createObject().ofType(Version.class).then().setBeanProperty().then().setNext("setVersion");
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + URL_TAG ).callMethod( "addUrl" ).usingElementBodyAsArgument();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + SERVICE_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG ).createObject().ofType( Provider.class ).then().setNext( "addProvider" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ROLE_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + ENABLED_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG ).createObject().ofType( Param.class ).then().addRule( paramRule ).then().setNext( "addParam" );
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + NAME_TAG ).setBeanProperty();
 +    forPattern( ROOT_TAG + "/" + PROVIDER_TAG + "/" + PARAM_TAG + "/" + VALUE_TAG ).setBeanProperty();
 +  }
 +
 +  private static class ParamRule extends Rule {
 +
 +    @Override
 +    public void begin( String namespace, String name, Attributes attributes ) {
 +      Param param = getDigester().peek();
 +      String paramName = attributes.getValue( "name" );
 +      if( paramName != null ) {
 +        param.setName( paramName );
 +        param.setValue( attributes.getValue( "value" ) );
 +      }
 +    }
 +
 +  }
 +
 +}
index 41a7c10,0000000..df31f3d
mode 100644,000000..100644
--- /dev/null
@@@ -1,422 -1,0 +1,681 @@@
-         final String   discoveryUser    = "admin";
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements. See the NOTICE file distributed with this
 + * work for additional information regarding copyright ownership. The ASF
 + * licenses this file to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance with the License.
 + * You may obtain a copy of the License at
 + *
 + * http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 + * License for the specific language governing permissions and limitations under
 + * the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import java.io.File;
 +import java.io.FileWriter;
 +import java.io.Writer;
 +import java.util.*;
 +
 +import org.junit.Test;
 +import static org.junit.Assert.*;
 +
 +
 +public class SimpleDescriptorFactoryTest {
 +
++    private enum FileType {
++        JSON,
++        YAML
++    }
 +
 +    @Test
 +    public void testParseJSONSimpleDescriptor() throws Exception {
++        testParseSimpleDescriptor(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptor() throws Exception {
++        testParseSimpleDescriptor(FileType.YAML);
++    }
++
++    @Test
++    public void testParseJSONSimpleDescriptorWithServiceParams() throws Exception {
++        testParseSimpleDescriptorWithServiceParams(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptorWithServiceParams() throws Exception {
++        testParseSimpleDescriptorWithServiceParams(FileType.YAML);
++    }
++
++    @Test
++    public void testParseJSONSimpleDescriptorWithApplications() throws Exception {
++        testParseSimpleDescriptorWithApplications(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptorApplications() throws Exception {
++        testParseSimpleDescriptorWithApplications(FileType.YAML);
++    }
++
++
++    @Test
++    public void testParseJSONSimpleDescriptorWithServicesAndApplications() throws Exception {
++        testParseSimpleDescriptorWithServicesAndApplications(FileType.JSON);
++    }
++
++    @Test
++    public void testParseYAMLSimpleDescriptorWithServicesAndApplications() throws Exception {
++        testParseSimpleDescriptorWithServicesAndApplications(FileType.YAML);
++    }
++
 +
++    private void testParseSimpleDescriptor(FileType type) throws Exception {
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
-         services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
++        final String   discoveryUser    = "joeblow";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
-         String fileName = "test-topology.json";
-         File testJSON = null;
++        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
-             testJSON = writeJSON(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testJSON.getAbsolutePath());
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-             if (testJSON != null) {
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           services);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services);
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +        } finally {
-                     testJSON.delete();
++            if (testFile != null) {
 +                try {
-     @Test
-     public void testParseJSONSimpleDescriptorWithServiceParams() throws Exception {
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
-         String fileName = "test-topology.json";
-         File testJSON = null;
++    private void testParseSimpleDescriptorWithServiceParams(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
 +        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
 +        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        services.put("KNOXSSO", null);
 +        services.put("KNOXTOKEN", null);
 +        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
 +
 +        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, String> knoxSSOParams = new HashMap<>();
 +        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
 +        knoxSSOParams.put("knoxsso.token.ttl", "100000");
 +        serviceParams.put("KNOXSSO", knoxSSOParams);
 +
 +        Map<String, String> knoxTokenParams = new HashMap<>();
 +        knoxTokenParams.put("knox.token.ttl", "36000000");
 +        knoxTokenParams.put("knox.token.audiences", "tokenbased");
 +        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
 +        serviceParams.put("KNOXTOKEN", knoxTokenParams);
 +
 +        Map<String, String> customRoleParams = new HashMap<>();
 +        customRoleParams.put("custom.param.1", "value1");
 +        customRoleParams.put("custom.param.2", "value2");
 +        serviceParams.put("CustomRole", customRoleParams);
 +
-             testJSON = writeJSON(fileName,
-                                  discoveryType,
-                                  discoveryAddress,
-                                  discoveryUser,
-                                  providerConfig,
-                                  clusterName,
-                                  services,
-                                  serviceParams);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testJSON.getAbsolutePath());
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-         } catch (Exception e) {
-             e.printStackTrace();
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           services,
++                                           serviceParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
 +            validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services, serviceParams);
-             if (testJSON != null) {
 +        } finally {
-                     testJSON.delete();
++            if (testFile != null) {
 +                try {
-     @Test
-     public void testParseYAMLSimpleDescriptor() throws Exception {
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
-         final String   discoveryUser    = "joeblow";
++    private void testParseSimpleDescriptorWithApplications(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
-         final Map<String, List<String>> services = new HashMap<>();
-         services.put("NODEMANAGER", null);
-         services.put("JOBTRACKER", null);
-         services.put("RESOURCEMANAGER", null);
-         services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
-         services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
-         String fileName = "test-topology.yml";
-         File testYAML = null;
++        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
-             testYAML = writeYAML(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testYAML.getAbsolutePath());
-             validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services);
-         } catch (Exception e) {
-             e.printStackTrace();
++        final Map<String, List<String>> apps = new HashMap<>();
++        apps.put("app-one", null);
++        apps.put("appTwo", null);
++        apps.put("thirdApps", null);
++        apps.put("appfour", Arrays.asList("http://host1:1234", "http://host2:5678", "http://host1:1357"));
++        apps.put("AppFive", Collections.singletonList("http://host5:8080"));
++
++        final Map<String, Map<String, String>> appParams = new HashMap<>();
++        Map<String, String> oneParams = new HashMap<>();
++        oneParams.put("appone.cookie.secure.only", "true");
++        oneParams.put("appone.token.ttl", "100000");
++        appParams.put("app-one", oneParams);
++        Map<String, String> fiveParams = new HashMap<>();
++        fiveParams.put("myproperty", "true");
++        fiveParams.put("anotherparam", "100000");
++        appParams.put("AppFive", fiveParams);
++
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-             if (testYAML != null) {
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           null,
++                                           null,
++                                           apps,
++                                           appParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
++            validateSimpleDescriptor(sd,
++                                     discoveryType,
++                                     discoveryAddress,
++                                     providerConfig,
++                                     clusterName,
++                                     null,
++                                     null,
++                                     apps,
++                                     appParams);
 +        } finally {
-                     testYAML.delete();
++            if (testFile != null) {
 +                try {
-     @Test
-     public void testParseYAMLSimpleDescriptorWithServiceParams() throws Exception {
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
-         final String   discoveryUser    = "joeblow";
++    private void testParseSimpleDescriptorWithServicesAndApplications(FileType type) throws Exception {
 +
 +        final String   discoveryType    = "AMBARI";
 +        final String   discoveryAddress = "http://c6401.ambari.apache.org:8080";
-         services.put("AMBARIUI", Arrays.asList("http://c6401.ambari.apache.org:8080"));
++        final String   discoveryUser    = "admin";
 +        final String   providerConfig   = "ambari-cluster-policy.xml";
 +        final String   clusterName      = "myCluster";
 +
 +        final Map<String, List<String>> services = new HashMap<>();
 +        services.put("NODEMANAGER", null);
 +        services.put("JOBTRACKER", null);
 +        services.put("RESOURCEMANAGER", null);
 +        services.put("HIVE", Arrays.asList("http://c6401.ambari.apache.org", "http://c6402.ambari.apache.org", "http://c6403.ambari.apache.org"));
-         String fileName = "test-topology.yml";
-         File testYAML = null;
++        services.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        services.put("KNOXSSO", null);
 +        services.put("KNOXTOKEN", null);
 +        services.put("CustomRole", Collections.singletonList("http://c6402.ambari.apache.org:1234"));
 +
 +        final Map<String, Map<String, String>> serviceParams = new HashMap<>();
 +        Map<String, String> knoxSSOParams = new HashMap<>();
 +        knoxSSOParams.put("knoxsso.cookie.secure.only", "true");
 +        knoxSSOParams.put("knoxsso.token.ttl", "100000");
 +        serviceParams.put("KNOXSSO", knoxSSOParams);
 +
 +        Map<String, String> knoxTokenParams = new HashMap<>();
 +        knoxTokenParams.put("knox.token.ttl", "36000000");
 +        knoxTokenParams.put("knox.token.audiences", "tokenbased");
 +        knoxTokenParams.put("knox.token.target.url", "https://localhost:8443/gateway/tokenbased");
 +        serviceParams.put("KNOXTOKEN", knoxTokenParams);
 +
 +        Map<String, String> customRoleParams = new HashMap<>();
 +        customRoleParams.put("custom.param.1", "value1");
 +        customRoleParams.put("custom.param.2", "value2");
 +        serviceParams.put("CustomRole", customRoleParams);
 +
-             testYAML = writeYAML(fileName, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, serviceParams);
-             SimpleDescriptor sd = SimpleDescriptorFactory.parse(testYAML.getAbsolutePath());
-             validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, services, serviceParams);
-         } catch (Exception e) {
-             e.printStackTrace();
++        final Map<String, List<String>> apps = new HashMap<>();
++        apps.put("app-one", null);
++        apps.put("appTwo", null);
++        apps.put("thirdApps", null);
++        apps.put("appfour", Arrays.asList("http://host1:1234", "http://host2:5678", "http://host1:1357"));
++        apps.put("AppFive", Collections.singletonList("http://host5:8080"));
++
++        final Map<String, Map<String, String>> appParams = new HashMap<>();
++        Map<String, String> oneParams = new HashMap<>();
++        oneParams.put("appone.cookie.secure.only", "true");
++        oneParams.put("appone.token.ttl", "100000");
++        appParams.put("app-one", oneParams);
++        Map<String, String> fiveParams = new HashMap<>();
++        fiveParams.put("myproperty", "true");
++        fiveParams.put("anotherparam", "100000");
++        appParams.put("AppFive", fiveParams);
++
++        String fileName = "test-topology." + getFileExtensionForType(type);
++        File testFile = null;
 +        try {
-             if (testYAML != null) {
++            testFile = writeDescriptorFile(type,
++                                           fileName,
++                                           discoveryType,
++                                           discoveryAddress,
++                                           discoveryUser,
++                                           providerConfig,
++                                           clusterName,
++                                           services,
++                                           serviceParams,
++                                           apps,
++                                           appParams);
++            SimpleDescriptor sd = SimpleDescriptorFactory.parse(testFile.getAbsolutePath());
++            validateSimpleDescriptor(sd,
++                                     discoveryType,
++                                     discoveryAddress,
++                                     providerConfig,
++                                     clusterName,
++                                     services,
++                                     serviceParams,
++                                     apps,
++                                     appParams);
 +        } finally {
-                     testYAML.delete();
++            if (testFile != null) {
 +                try {
-     private void validateSimpleDescriptor(SimpleDescriptor          sd,
-                                           String                    discoveryType,
-                                           String                    discoveryAddress,
-                                           String                    providerConfig,
-                                           String                    clusterName,
-                                           Map<String, List<String>> expectedServices) {
-         validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, expectedServices, null);
-     }
-     private void validateSimpleDescriptor(SimpleDescriptor                 sd,
-                                           String                           discoveryType,
-                                           String                           discoveryAddress,
-                                           String                           providerConfig,
-                                           String                           clusterName,
-                                           Map<String, List<String>>        expectedServices,
-                                           Map<String, Map<String, String>> expectedServiceParameters) {
-         assertNotNull(sd);
-         assertEquals(discoveryType, sd.getDiscoveryType());
-         assertEquals(discoveryAddress, sd.getDiscoveryAddress());
-         assertEquals(providerConfig, sd.getProviderConfig());
-         assertEquals(clusterName, sd.getClusterName());
-         List<SimpleDescriptor.Service> actualServices = sd.getServices();
-         assertEquals(expectedServices.size(), actualServices.size());
-         for (SimpleDescriptor.Service actualService : actualServices) {
-             assertTrue(expectedServices.containsKey(actualService.getName()));
-             assertEquals(expectedServices.get(actualService.getName()), actualService.getURLs());
-             // Validate service parameters
-             if (expectedServiceParameters != null) {
-                 if (expectedServiceParameters.containsKey(actualService.getName())) {
-                     Map<String, String> expectedParams = expectedServiceParameters.get(actualService.getName());
-                     Map<String, String> actualServiceParams = actualService.getParams();
-                     assertNotNull(actualServiceParams);
-                     // Validate the size of the service parameter set
-                     assertEquals(expectedParams.size(), actualServiceParams.size());
-                     // Validate the parameter contents
-                     for (String paramName : actualServiceParams.keySet()) {
-                         assertTrue(expectedParams.containsKey(paramName));
-                         assertEquals(expectedParams.get(paramName), actualServiceParams.get(paramName));
-                     }
-                 }
-             }
++                    testFile.delete();
 +                } catch (Exception e) {
 +                    // Ignore
 +                }
 +            }
 +        }
 +    }
 +
-     private File writeJSON(String path, String content) throws Exception {
-         File f = new File(path);
-         Writer fw = new FileWriter(f);
-         fw.write(content);
-         fw.flush();
-         fw.close();
-         return f;
++    private String getFileExtensionForType(FileType type) {
++        String extension = null;
++        switch (type) {
++            case JSON:
++                extension = "json";
++                break;
++            case YAML:
++                extension = "yml";
++                break;
 +        }
++        return extension;
 +    }
 +
++    private File writeDescriptorFile(FileType type,
++                                     String                           path,
++                                     String                           discoveryType,
++                                     String                           discoveryAddress,
++                                     String                           discoveryUser,
++                                     String                           providerConfig,
++                                     String                           clusterName,
++                                     Map<String, List<String>>        services) throws Exception {
++        return writeDescriptorFile(type,
++                                   path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   null);
++    }
 +
-     private File writeJSON(String path,
-                            String discoveryType,
-                            String discoveryAddress,
-                            String discoveryUser,
-                            String providerConfig,
-                            String clusterName,
-                            Map<String, List<String>> services) throws Exception {
-         return writeJSON(path, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, null);
++    private File writeDescriptorFile(FileType type,
++                                     String                           path,
++                                     String                           discoveryType,
++                                     String                           discoveryAddress,
++                                     String                           discoveryUser,
++                                     String                           providerConfig,
++                                     String                           clusterName,
++                                     Map<String, List<String>>        services,
++                                     Map<String, Map<String, String>> serviceParams) throws Exception {
++        return writeDescriptorFile(type,
++                                   path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   serviceParams,
++                                   null,
++                                   null);
 +    }
 +
 +
-                            Map<String, Map<String, String>> serviceParams) throws Exception {
++    private File writeDescriptorFile(FileType type,
++                                     String                           path,
++                                     String                           discoveryType,
++                                     String                           discoveryAddress,
++                                     String                           discoveryUser,
++                                     String                           providerConfig,
++                                     String                           clusterName,
++                                     Map<String, List<String>>        services,
++                                     Map<String, Map<String, String>> serviceParams,
++                                     Map<String, List<String>>        apps,
++                                     Map<String, Map<String, String>> appParams) throws Exception {
++        File result = null;
++        switch (type) {
++            case JSON:
++                result = writeJSON(path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   serviceParams,
++                                   apps,
++                                   appParams);
++                break;
++            case YAML:
++                result = writeYAML(path,
++                                   discoveryType,
++                                   discoveryAddress,
++                                   discoveryUser,
++                                   providerConfig,
++                                   clusterName,
++                                   services,
++                                   serviceParams,
++                                   apps,
++                                   appParams);
++                break;
++        }
++        return result;
 +    }
 +
++
 +    private File writeJSON(String path,
 +                           String discoveryType,
 +                           String discoveryAddress,
 +                           String discoveryUser,
 +                           String providerConfig,
 +                           String clusterName,
 +                           Map<String, List<String>> services,
-         fw.write("\"cluster\":\"" + clusterName + "\",\n");
-         fw.write("\"services\":[\n");
-         int i = 0;
-         for (String name : services.keySet()) {
-             fw.write("{\"name\":\"" + name + "\"");
-             // Service params
-             if (serviceParams != null && !serviceParams.isEmpty()) {
-                 Map<String, String> params = serviceParams.get(name);
-                 if (params != null && !params.isEmpty()) {
-                     fw.write(",\n\"params\":{\n");
-                     Iterator<String> paramNames = params.keySet().iterator();
-                     while (paramNames.hasNext()) {
-                         String paramName = paramNames.next();
-                         String paramValue = params.get(paramName);
-                         fw.write("\"" + paramName + "\":\"" + paramValue + "\"");
-                         fw.write(paramNames.hasNext() ? ",\n" : "");
-                     }
-                     fw.write("\n}");
-                 }
-             }
++                           Map<String, Map<String, String>> serviceParams,
++                           Map<String, List<String>> apps,
++                           Map<String, Map<String, String>> appParams) throws Exception {
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("{" + "\n");
 +        fw.write("\"discovery-type\":\"" + discoveryType + "\",\n");
 +        fw.write("\"discovery-address\":\"" + discoveryAddress + "\",\n");
 +        fw.write("\"discovery-user\":\"" + discoveryUser + "\",\n");
 +        fw.write("\"provider-config-ref\":\"" + providerConfig + "\",\n");
-             // Service URLs
-             List<String> urls = services.get(name);
-             if (urls != null) {
-                 fw.write(",\n\"urls\":[");
-                 Iterator<String> urlIter = urls.iterator();
-                 while (urlIter.hasNext()) {
-                     fw.write("\"" + urlIter.next() + "\"");
-                     if (urlIter.hasNext()) {
-                         fw.write(", ");
-                     }
-                 }
-                 fw.write("]\n");
-             }
++        fw.write("\"cluster\":\"" + clusterName + "\"");
 +
-             fw.write("}");
-             if (i++ < services.size() - 1) {
-                 fw.write(",");
-             }
-             fw.write("\n");
++        if (services != null && !services.isEmpty()) {
++            fw.write(",\n\"services\":[\n");
++            writeServiceOrApplicationJSON(fw, services, serviceParams);
++            fw.write("]\n");
++        }
 +
-         fw.write("]\n");
++        if (apps != null && !apps.isEmpty()) {
++            fw.write(",\n\"applications\":[\n");
++            writeServiceOrApplicationJSON(fw, apps, appParams);
++            fw.write("]\n");
 +        }
-     private File writeYAML(String                    path,
-                            String                    discoveryType,
-                            String                    discoveryAddress,
-                            String                    discoveryUser,
-                            String                    providerConfig,
-                            String                    clusterName,
-                            Map<String, List<String>> services) throws Exception {
-         return writeYAML(path, discoveryType, discoveryAddress, discoveryUser, providerConfig, clusterName, services, null);
-     }
++
 +        fw.write("}\n");
 +        fw.flush();
 +        fw.close();
 +
 +        return f;
 +    }
 +
++    private void writeServiceOrApplicationJSON(Writer fw,
++                                               Map<String, List<String>> elementURLs,
++                                               Map<String, Map<String, String>> elementParams) throws Exception {
++        if (elementURLs != null) {
++            int i = 0;
++            for (String name : elementURLs.keySet()) {
++                fw.write("{\"name\":\"" + name + "\"");
++
++                // Service params
++                if (elementParams != null && !elementParams.isEmpty()) {
++                    Map<String, String> params = elementParams.get(name);
++                    if (params != null && !params.isEmpty()) {
++                        fw.write(",\n\"params\":{\n");
++                        Iterator<String> paramNames = params.keySet().iterator();
++                        while (paramNames.hasNext()) {
++                            String paramName = paramNames.next();
++                            String paramValue = params.get(paramName);
++                            fw.write("\"" + paramName + "\":\"" + paramValue + "\"");
++                            fw.write(paramNames.hasNext() ? ",\n" : "");
++                        }
++                        fw.write("\n}");
++                    }
++                }
 +
-                            Map<String, Map<String, String>> serviceParams) throws Exception {
++                // Service URLs
++                List<String> urls = elementURLs.get(name);
++                if (urls != null) {
++                    fw.write(",\n\"urls\":[");
++                    Iterator<String> urlIter = urls.iterator();
++                    while (urlIter.hasNext()) {
++                        fw.write("\"" + urlIter.next() + "\"");
++                        if (urlIter.hasNext()) {
++                            fw.write(", ");
++                        }
++                    }
++                    fw.write("]\n");
++                }
 +
++                fw.write("}");
++                if (i++ < elementURLs.size() - 1) {
++                    fw.write(",");
++                }
++                fw.write("\n");
++            }
++        }
++    }
 +
 +    private File writeYAML(String                           path,
 +                           String                           discoveryType,
 +                           String                           discoveryAddress,
 +                           String                           discoveryUser,
 +                           String                           providerConfig,
 +                           String                           clusterName,
 +                           Map<String, List<String>>        services,
-         fw.write("services:\n");
-         for (String name : services.keySet()) {
++                           Map<String, Map<String, String>> serviceParams,
++                           Map<String, List<String>>        apps,
++                           Map<String, Map<String, String>> appParams) throws Exception {
++
 +        File f = new File(path);
 +
 +        Writer fw = new FileWriter(f);
 +        fw.write("---" + "\n");
 +        fw.write("discovery-type: " + discoveryType + "\n");
 +        fw.write("discovery-address: " + discoveryAddress + "\n");
 +        fw.write("discovery-user: " + discoveryUser + "\n");
 +        fw.write("provider-config-ref: " + providerConfig + "\n");
 +        fw.write("cluster: " + clusterName+ "\n");
-             if (serviceParams != null && !serviceParams.isEmpty()) {
-                 if (serviceParams.containsKey(name)) {
-                     Map<String, String> params = serviceParams.get(name);
++
++        if (services != null && !services.isEmpty()) {
++            fw.write("services:\n");
++            writeServiceOrApplicationYAML(fw, services, serviceParams);
++        }
++
++        if (apps != null && !apps.isEmpty()) {
++            fw.write("applications:\n");
++            writeServiceOrApplicationYAML(fw, apps, appParams);
++        }
++
++        fw.flush();
++        fw.close();
++
++        return f;
++    }
++
++    private void writeServiceOrApplicationYAML(Writer                           fw,
++                                               Map<String, List<String>>        elementURLs,
++                                               Map<String, Map<String, String>> elementParams) throws Exception {
++        for (String name : elementURLs.keySet()) {
 +            fw.write("    - name: " + name + "\n");
 +
 +            // Service params
-             List<String> urls = services.get(name);
++            if (elementParams != null && !elementParams.isEmpty()) {
++                if (elementParams.containsKey(name)) {
++                    Map<String, String> params = elementParams.get(name);
 +                    fw.write("      params:\n");
 +                    for (String paramName : params.keySet()) {
 +                        fw.write("            " + paramName + ": " + params.get(paramName) + "\n");
 +                    }
 +                }
 +            }
 +
 +            // Service URLs
-         fw.flush();
-         fw.close();
++            List<String> urls = elementURLs.get(name);
 +            if (urls != null) {
 +                fw.write("      urls:\n");
 +                for (String url : urls) {
 +                    fw.write("          - " + url + "\n");
 +                }
 +            }
 +        }
-         return f;
++    }
 +
++
++    private void validateSimpleDescriptor(SimpleDescriptor          sd,
++                                          String                    discoveryType,
++                                          String                    discoveryAddress,
++                                          String                    providerConfig,
++                                          String                    clusterName,
++                                          Map<String, List<String>> expectedServices) {
++        validateSimpleDescriptor(sd, discoveryType, discoveryAddress, providerConfig, clusterName, expectedServices, null);
++    }
++
++
++    private void validateSimpleDescriptor(SimpleDescriptor                 sd,
++                                          String                           discoveryType,
++                                          String                           discoveryAddress,
++                                          String                           providerConfig,
++                                          String                           clusterName,
++                                          Map<String, List<String>>        expectedServices,
++                                          Map<String, Map<String, String>> expectedServiceParameters) {
++        validateSimpleDescriptor(sd,
++                                 discoveryType,
++                                 discoveryAddress,
++                                 providerConfig,
++                                 clusterName,
++                                 expectedServices,
++                                 expectedServiceParameters,
++                                 null,
++                                 null);
 +    }
 +
++    private void validateSimpleDescriptor(SimpleDescriptor                 sd,
++                                          String                           discoveryType,
++                                          String                           discoveryAddress,
++                                          String                           providerConfig,
++                                          String                           clusterName,
++                                          Map<String, List<String>>        expectedServices,
++                                          Map<String, Map<String, String>> expectedServiceParameters,
++                                          Map<String, List<String>>        expectedApps,
++                                          Map<String, Map<String, String>> expectedAppParameters) {
++        assertNotNull(sd);
++        assertEquals(discoveryType, sd.getDiscoveryType());
++        assertEquals(discoveryAddress, sd.getDiscoveryAddress());
++        assertEquals(providerConfig, sd.getProviderConfig());
++        assertEquals(clusterName, sd.getClusterName());
++
++        List<SimpleDescriptor.Service> actualServices = sd.getServices();
++
++        if (expectedServices == null) {
++            assertTrue(actualServices.isEmpty());
++        } else {
++            assertEquals(expectedServices.size(), actualServices.size());
++
++            for (SimpleDescriptor.Service actualService : actualServices) {
++                assertTrue(expectedServices.containsKey(actualService.getName()));
++                assertEquals(expectedServices.get(actualService.getName()), actualService.getURLs());
++
++                // Validate service parameters
++                if (expectedServiceParameters != null) {
++                    if (expectedServiceParameters.containsKey(actualService.getName())) {
++                        Map<String, String> expectedParams = expectedServiceParameters.get(actualService.getName());
++
++                        Map<String, String> actualServiceParams = actualService.getParams();
++                        assertNotNull(actualServiceParams);
++
++                        // Validate the size of the service parameter set
++                        assertEquals(expectedParams.size(), actualServiceParams.size());
++
++                        // Validate the parameter contents
++                        for (String paramName : actualServiceParams.keySet()) {
++                            assertTrue(expectedParams.containsKey(paramName));
++                            assertEquals(expectedParams.get(paramName), actualServiceParams.get(paramName));
++                        }
++                    }
++                }
++            }
++        }
++
++        List<SimpleDescriptor.Application> actualApps = sd.getApplications();
++
++        if (expectedApps == null) {
++            assertTrue(actualApps.isEmpty());
++        } else {
++            assertEquals(expectedApps.size(), actualApps.size());
++
++            for (SimpleDescriptor.Application actualApp : actualApps) {
++                assertTrue(expectedApps.containsKey(actualApp.getName()));
++                assertEquals(expectedApps.get(actualApp.getName()), actualApp.getURLs());
++
++                // Validate service parameters
++                if (expectedServiceParameters != null) {
++                    if (expectedAppParameters.containsKey(actualApp.getName())) {
++                        Map<String, String> expectedParams = expectedAppParameters.get(actualApp.getName());
++
++                        Map<String, String> actualAppParams = actualApp.getParams();
++                        assertNotNull(actualAppParams);
++
++                        // Validate the size of the service parameter set
++                        assertEquals(expectedParams.size(), actualAppParams.size());
++
++                        // Validate the parameter contents
++                        for (String paramName : actualAppParams.keySet()) {
++                            assertTrue(expectedParams.containsKey(paramName));
++                            assertEquals(expectedParams.get(paramName), actualAppParams.get(paramName));
++                        }
++                    }
++                }
++            }
++        }
++    }
 +
 +}
index f40fad7,0000000..575b68a
mode 100644,000000..100644
--- /dev/null
@@@ -1,447 -1,0 +1,455 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology.simple;
 +
 +import org.apache.knox.gateway.topology.validation.TopologyValidator;
 +import org.apache.knox.gateway.util.XmlUtils;
 +import java.io.ByteArrayInputStream;
 +import java.io.File;
 +import java.io.FileNotFoundException;
 +import java.io.FileOutputStream;
 +import java.io.IOException;
 +
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +import java.util.Properties;
 +
 +import javax.xml.xpath.XPath;
 +import javax.xml.xpath.XPathConstants;
 +import javax.xml.xpath.XPathFactory;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.easymock.EasyMock;
 +import org.junit.Test;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.Node;
 +import org.w3c.dom.NodeList;
 +import org.xml.sax.SAXException;
 +
++import static org.hamcrest.Matchers.hasXPath;
++import static org.hamcrest.Matchers.is;
 +import static org.junit.Assert.assertEquals;
 +import static org.junit.Assert.assertFalse;
 +import static org.junit.Assert.assertNotNull;
++import static org.junit.Assert.assertThat;
 +import static org.junit.Assert.assertTrue;
 +import static org.junit.Assert.fail;
 +
 +
 +public class SimpleDescriptorHandlerTest {
 +
 +    private static final String TEST_PROVIDER_CONFIG =
 +            "    <gateway>\n" +
 +                    "        <provider>\n" +
 +                    "            <role>authentication</role>\n" +
 +                    "            <name>ShiroProvider</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param>\n" +
 +                    "                <!-- \n" +
 +                    "                session timeout in minutes,  this is really idle timeout,\n" +
 +                    "                defaults to 30mins, if the property value is not defined,, \n" +
 +                    "                current client authentication would expire if client idles contiuosly for more than this value\n" +
 +                    "                -->\n" +
 +                    "                <name>sessionTimeout</name>\n" +
 +                    "                <value>30</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapRealm</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapContextFactory</name>\n" +
 +                    "                <value>org.apache.knox.gateway.shirorealm.KnoxLdapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory</name>\n" +
 +                    "                <value>$ldapContextFactory</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.userDnTemplate</name>\n" +
 +                    "                <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.url</name>\n" +
 +                    "                <value>ldap://localhost:33389</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n" +
 +                    "                <value>simple</value>\n" +
 +                    "            </param>\n" +
 +                    "            <param>\n" +
 +                    "                <name>urls./**</name>\n" +
 +                    "                <value>authcBasic</value>\n" +
 +                    "            </param>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <provider>\n" +
 +                    "            <role>identity-assertion</role>\n" +
 +                    "            <name>Default</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "        </provider>\n" +
 +                    "\n" +
 +                    "        <!--\n" +
 +                    "        Defines rules for mapping host names internal to a Hadoop cluster to externally accessible host names.\n" +
 +                    "        For example, a hadoop service running in AWS may return a response that includes URLs containing the\n" +
 +                    "        some AWS internal host name.  If the client needs to make a subsequent request to the host identified\n" +
 +                    "        in those URLs they need to be mapped to external host names that the client Knox can use to connect.\n" +
 +                    "\n" +
 +                    "        If the external hostname and internal host names are same turn of this provider by setting the value of\n" +
 +                    "        enabled parameter as false.\n" +
 +                    "\n" +
 +                    "        The name parameter specifies the external host names in a comma separated list.\n" +
 +                    "        The value parameter specifies corresponding internal host names in a comma separated list.\n" +
 +                    "\n" +
 +                    "        Note that when you are using Sandbox, the external hostname needs to be localhost, as seen in out\n" +
 +                    "        of box sandbox.xml.  This is because Sandbox uses port mapping to allow clients to connect to the\n" +
 +                    "        Hadoop services using localhost.  In real clusters, external host names would almost never be localhost.\n" +
 +                    "        -->\n" +
 +                    "        <provider>\n" +
 +                    "            <role>hostmap</role>\n" +
 +                    "            <name>static</name>\n" +
 +                    "            <enabled>true</enabled>\n" +
 +                    "            <param><name>localhost</name><value>sandbox,sandbox.hortonworks.com</value></param>\n" +
 +                    "        </provider>\n" +
 +                    "    </gateway>\n";
 +
 +
 +    /**
 +     * KNOX-1006
 +     *
 +     * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
 +     *             org.apache.knox.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
 +     */
 +    @Test
 +    public void testSimpleDescriptorHandler() throws Exception {
 +
 +        final String type = "PROPERTIES_FILE";
 +        final String clusterName = "dummy";
 +
 +        // Create a properties file to be the source of service discovery details for this test
 +        final File discoveryConfig = File.createTempFile(getClass().getName() + "_discovery-config", ".properties");
 +
 +        final String address = discoveryConfig.getAbsolutePath();
 +
 +        final Properties DISCOVERY_PROPERTIES = new Properties();
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".name", clusterName);
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".NAMENODE", "hdfs://namenodehost:8020");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".JOBTRACKER", "rpc://jobtrackerhostname:8050");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHDFS", "http://webhdfshost:1234");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHCAT", "http://webhcathost:50111/templeton");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".OOZIE", "http://ooziehost:11000/oozie");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".WEBHBASE", "http://webhbasehost:1234");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".HIVE", "http://hivehostname:10001/clipath");
 +        DISCOVERY_PROPERTIES.setProperty(clusterName + ".RESOURCEMANAGER", "http://remanhost:8088/ws");
 +
 +        try {
 +            DISCOVERY_PROPERTIES.store(new FileOutputStream(discoveryConfig), null);
 +        } catch (FileNotFoundException e) {
 +            fail(e.getMessage());
 +        }
 +
 +        final Map<String, List<String>> serviceURLs = new HashMap<>();
 +        serviceURLs.put("NAMENODE", null);
 +        serviceURLs.put("JOBTRACKER", null);
 +        serviceURLs.put("WEBHDFS", null);
 +        serviceURLs.put("WEBHCAT", null);
 +        serviceURLs.put("OOZIE", null);
 +        serviceURLs.put("WEBHBASE", null);
 +        serviceURLs.put("HIVE", null);
 +        serviceURLs.put("RESOURCEMANAGER", null);
 +        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +        serviceURLs.put("KNOXSSO", null);
 +
 +        // Write the externalized provider config to a temp file
 +        File providerConfig = new File(System.getProperty("java.io.tmpdir"), "ambari-cluster-policy.xml");
 +        FileUtils.write(providerConfig, TEST_PROVIDER_CONFIG);
 +
 +        File topologyFile = null;
 +        try {
 +            File destDir = new File(System.getProperty("java.io.tmpdir")).getCanonicalFile();
 +
 +            Map<String, Map<String, String>> serviceParameters = new HashMap<>();
 +            Map<String, String> knoxssoParams = new HashMap<>();
 +            knoxssoParams.put("knoxsso.cookie.secure.only", "true");
 +            knoxssoParams.put("knoxsso.token.ttl", "100000");
 +            serviceParameters.put("KNOXSSO", knoxssoParams);
 +
 +            // Mock out the simple descriptor
 +            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
 +            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
 +            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
 +            EasyMock.expect(testDescriptor.getClusterName()).andReturn(clusterName).anyTimes();
 +            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
 +            for (String serviceName : serviceURLs.keySet()) {
 +                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
 +                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
 +                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
 +                EasyMock.expect(svc.getParams()).andReturn(serviceParameters.get(serviceName)).anyTimes();
 +                EasyMock.replay(svc);
 +                serviceMocks.add(svc);
 +            }
 +            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
 +            EasyMock.replay(testDescriptor);
 +
 +            // Invoke the simple descriptor handler
 +            Map<String, File> files =
 +                           SimpleDescriptorHandler.handle(testDescriptor,
 +                                                          providerConfig.getParentFile(), // simple desc co-located with provider config
 +                                                          destDir);
 +            topologyFile = files.get("topology");
 +
 +            // Validate the resulting topology descriptor
 +            assertTrue(topologyFile.exists());
 +
 +            // Validate the topology descriptor's correctness
 +            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
 +            if( !validator.validateTopology() ){
 +                throw new SAXException( validator.getErrorString() );
 +            }
 +
 +            XPathFactory xPathfactory = XPathFactory.newInstance();
 +            XPath xpath = xPathfactory.newXPath();
 +
 +            // Parse the topology descriptor
 +            Document topologyXml = XmlUtils.readXml(topologyFile);
 +
++            // KNOX-1105 Mark generated topology files
++            assertThat("Expected the \"generated\" marker element in the topology XML, with value of \"true\".",
++                       topologyXml,
++                       hasXPath("/topology/generated", is("true")));
++
 +            // Validate the provider configuration
 +            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
 +            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
 +            assertTrue("Resulting provider config should be identical to the referenced content.",
 +                       extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
 +
 +            // Validate the service declarations
 +            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
 +            NodeList serviceNodes =
 +                        (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
 +            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
 +                Node serviceNode = serviceNodes.item(serviceNodeIndex);
 +
 +                // Validate the role
 +                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
 +                assertNotNull(roleNode);
 +                String role = roleNode.getNodeValue();
 +
 +                // Validate the URLs
 +                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
 +                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
 +                    Node urlNode = urlNodes.item(urlNodeIndex);
 +                    assertNotNull(urlNode);
 +                    String url = urlNode.getNodeValue();
 +
 +                    // If the service should have a URL (some don't require it)
 +                    if (serviceURLs.containsKey(role)) {
 +                        assertNotNull("Declared service should have a URL.", url);
 +                        if (!topologyServiceURLs.containsKey(role)) {
 +                            topologyServiceURLs.put(role, new ArrayList<>());
 +                        }
 +                        topologyServiceURLs.get(role).add(url); // Add it for validation later
 +                    }
 +                }
 +
 +                // If params were declared in the descriptor, then validate them in the resulting topology file
 +                Map<String, String> params = serviceParameters.get(role);
 +                if (params != null) {
 +                    NodeList paramNodes = (NodeList) xpath.compile("param").evaluate(serviceNode, XPathConstants.NODESET);
 +                    for (int paramNodeIndex = 0; paramNodeIndex < paramNodes.getLength(); paramNodeIndex++) {
 +                        Node paramNode = paramNodes.item(paramNodeIndex);
 +                        String paramName = (String) xpath.compile("name/text()").evaluate(paramNode, XPathConstants.STRING);
 +                        String paramValue = (String) xpath.compile("value/text()").evaluate(paramNode, XPathConstants.STRING);
 +                        assertTrue(params.keySet().contains(paramName));
 +                        assertEquals(params.get(paramName), paramValue);
 +                    }
 +                }
 +
 +            }
 +            assertEquals("Unexpected number of service declarations.", (serviceURLs.size() - 1), topologyServiceURLs.size());
 +
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +            fail(e.getMessage());
 +        } finally {
 +            providerConfig.delete();
 +            discoveryConfig.delete();
 +            if (topologyFile != null) {
 +                topologyFile.delete();
 +            }
 +        }
 +    }
 +
 +
 +    /**
 +     * KNOX-1006
 +     *
 +     * Verify the behavior of the SimpleDescriptorHandler when service discovery fails to produce a valid URL for
 +     * a service.
 +     *
 +     * N.B. This test depends on the PropertiesFileServiceDiscovery extension being configured:
 +     *             org.apache.knox.gateway.topology.discovery.test.extension.PropertiesFileServiceDiscovery
 +     */
 +    @Test
 +    public void testInvalidServiceURLFromDiscovery() throws Exception {
 +        final String CLUSTER_NAME = "myproperties";
 +
 +        // Configure the PropertiesFile Service Discovery implementation for this test
 +        final String DEFAULT_VALID_SERVICE_URL = "http://localhost:9999/thiswillwork";
 +        Properties serviceDiscoverySourceProps = new Properties();
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".NAMENODE",
 +                                                DEFAULT_VALID_SERVICE_URL.replace("http", "hdfs"));
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".JOBTRACKER",
 +                                                DEFAULT_VALID_SERVICE_URL.replace("http", "rpc"));
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHDFS",         DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHCAT",         DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".OOZIE",           DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".WEBHBASE",        DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".HIVE",            "{SCHEME}://localhost:10000/");
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".RESOURCEMANAGER", DEFAULT_VALID_SERVICE_URL);
 +        serviceDiscoverySourceProps.setProperty(CLUSTER_NAME + ".AMBARIUI",        DEFAULT_VALID_SERVICE_URL);
 +        File serviceDiscoverySource = File.createTempFile("service-discovery", ".properties");
 +        serviceDiscoverySourceProps.store(new FileOutputStream(serviceDiscoverySource),
 +                                          "Test Service Discovery Source");
 +
 +        // Prepare a mock SimpleDescriptor
 +        final String type = "PROPERTIES_FILE";
 +        final String address = serviceDiscoverySource.getAbsolutePath();
 +        final Map<String, List<String>> serviceURLs = new HashMap<>();
 +        serviceURLs.put("NAMENODE", null);
 +        serviceURLs.put("JOBTRACKER", null);
 +        serviceURLs.put("WEBHDFS", null);
 +        serviceURLs.put("WEBHCAT", null);
 +        serviceURLs.put("OOZIE", null);
 +        serviceURLs.put("WEBHBASE", null);
 +        serviceURLs.put("HIVE", null);
 +        serviceURLs.put("RESOURCEMANAGER", null);
 +        serviceURLs.put("AMBARIUI", Collections.singletonList("http://c6401.ambari.apache.org:8080"));
 +
 +        // Write the externalized provider config to a temp file
 +        File providerConfig = writeProviderConfig("ambari-cluster-policy.xml", TEST_PROVIDER_CONFIG);
 +
 +        File topologyFile = null;
 +        try {
 +            File destDir = (new File(".")).getCanonicalFile();
 +
 +            // Mock out the simple descriptor
 +            SimpleDescriptor testDescriptor = EasyMock.createNiceMock(SimpleDescriptor.class);
 +            EasyMock.expect(testDescriptor.getName()).andReturn("mysimpledescriptor").anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryAddress()).andReturn(address).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryType()).andReturn(type).anyTimes();
 +            EasyMock.expect(testDescriptor.getDiscoveryUser()).andReturn(null).anyTimes();
 +            EasyMock.expect(testDescriptor.getProviderConfig()).andReturn(providerConfig.getAbsolutePath()).anyTimes();
 +            EasyMock.expect(testDescriptor.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
 +            List<SimpleDescriptor.Service> serviceMocks = new ArrayList<>();
 +            for (String serviceName : serviceURLs.keySet()) {
 +                SimpleDescriptor.Service svc = EasyMock.createNiceMock(SimpleDescriptor.Service.class);
 +                EasyMock.expect(svc.getName()).andReturn(serviceName).anyTimes();
 +                EasyMock.expect(svc.getURLs()).andReturn(serviceURLs.get(serviceName)).anyTimes();
 +                EasyMock.replay(svc);
 +                serviceMocks.add(svc);
 +            }
 +            EasyMock.expect(testDescriptor.getServices()).andReturn(serviceMocks).anyTimes();
 +            EasyMock.replay(testDescriptor);
 +
 +            // Invoke the simple descriptor handler
 +            Map<String, File> files =
 +                    SimpleDescriptorHandler.handle(testDescriptor,
 +                                                   providerConfig.getParentFile(), // simple desc co-located with provider config
 +                                                   destDir);
 +
 +            topologyFile = files.get("topology");
 +
 +            // Validate the resulting topology descriptor
 +            assertTrue(topologyFile.exists());
 +
 +            // Validate the topology descriptor's correctness
 +            TopologyValidator validator = new TopologyValidator( topologyFile.getAbsolutePath() );
 +            if( !validator.validateTopology() ){
 +                throw new SAXException( validator.getErrorString() );
 +            }
 +
 +            XPathFactory xPathfactory = XPathFactory.newInstance();
 +            XPath xpath = xPathfactory.newXPath();
 +
 +            // Parse the topology descriptor
 +            Document topologyXml = XmlUtils.readXml(topologyFile);
 +
 +            // Validate the provider configuration
 +            Document extProviderConf = XmlUtils.readXml(new ByteArrayInputStream(TEST_PROVIDER_CONFIG.getBytes()));
 +            Node gatewayNode = (Node) xpath.compile("/topology/gateway").evaluate(topologyXml, XPathConstants.NODE);
 +            assertTrue("Resulting provider config should be identical to the referenced content.",
 +                    extProviderConf.getDocumentElement().isEqualNode(gatewayNode));
 +
 +            // Validate the service declarations
 +            List<String> topologyServices = new ArrayList<>();
 +            Map<String, List<String>> topologyServiceURLs = new HashMap<>();
 +            NodeList serviceNodes =
 +                    (NodeList) xpath.compile("/topology/service").evaluate(topologyXml, XPathConstants.NODESET);
 +            for (int serviceNodeIndex=0; serviceNodeIndex < serviceNodes.getLength(); serviceNodeIndex++) {
 +                Node serviceNode = serviceNodes.item(serviceNodeIndex);
 +                Node roleNode = (Node) xpath.compile("role/text()").evaluate(serviceNode, XPathConstants.NODE);
 +                assertNotNull(roleNode);
 +                String role = roleNode.getNodeValue();
 +                topologyServices.add(role);
 +                NodeList urlNodes = (NodeList) xpath.compile("url/text()").evaluate(serviceNode, XPathConstants.NODESET);
 +                for(int urlNodeIndex = 0 ; urlNodeIndex < urlNodes.getLength(); urlNodeIndex++) {
 +                    Node urlNode = urlNodes.item(urlNodeIndex);
 +                    assertNotNull(urlNode);
 +                    String url = urlNode.getNodeValue();
 +                    assertNotNull("Every declared service should have a URL.", url);
 +                    if (!topologyServiceURLs.containsKey(role)) {
 +                        topologyServiceURLs.put(role, new ArrayList<>());
 +                    }
 +                    topologyServiceURLs.get(role).add(url);
 +                }
 +            }
 +
 +            // There should not be a service element for HIVE, since it had no valid URLs
 +            assertEquals("Unexpected number of service declarations.", serviceURLs.size() - 1, topologyServices.size());
 +            assertFalse("The HIVE service should have been omitted from the generated topology.", topologyServices.contains("HIVE"));
 +
 +            assertEquals("Unexpected number of service URLs.", serviceURLs.size() - 1, topologyServiceURLs.size());
 +
 +        } catch (Exception e) {
 +            e.printStackTrace();
 +            fail(e.getMessage());
 +        } finally {
 +            serviceDiscoverySource.delete();
 +            providerConfig.delete();
 +            if (topologyFile != null) {
 +                topologyFile.delete();
 +            }
 +        }
 +    }
 +
 +
 +    private File writeProviderConfig(String path, String content) throws IOException {
 +        File f = new File(path);
 +        FileUtils.write(f, content);
 +        return f;
 +    }
 +
 +}
index a0035fc,0000000..9ecd7fc
mode 100644,000000..100644
--- /dev/null
@@@ -1,657 -1,0 +1,674 @@@
-     GatewayServices gs = (GatewayServices) request.getServletContext()
-         .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin;
 +
 +import com.fasterxml.jackson.annotation.JsonProperty;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.FilenameUtils;
 +import org.apache.knox.gateway.i18n.GatewaySpiMessages;
 +import org.apache.knox.gateway.i18n.messages.MessagesFactory;
 +import org.apache.knox.gateway.service.admin.beans.BeanConverter;
 +import org.apache.knox.gateway.service.admin.beans.Topology;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.topology.TopologyService;
 +
 +import javax.servlet.http.HttpServletRequest;
 +import javax.ws.rs.Consumes;
 +import javax.ws.rs.DELETE;
 +import javax.ws.rs.GET;
 +import javax.ws.rs.PUT;
 +import javax.ws.rs.Path;
 +import javax.ws.rs.PathParam;
 +import javax.ws.rs.Produces;
 +import javax.ws.rs.core.Context;
 +import javax.ws.rs.core.Response;
 +import javax.xml.bind.annotation.XmlAccessType;
 +import javax.xml.bind.annotation.XmlAccessorType;
 +import javax.xml.bind.annotation.XmlElement;
 +import javax.xml.bind.annotation.XmlElementWrapper;
 +import java.io.File;
 +import java.io.IOException;
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.Collections;
 +import java.util.Comparator;
 +import java.util.List;
 +
 +import static javax.ws.rs.core.MediaType.APPLICATION_JSON;
 +import static javax.ws.rs.core.MediaType.APPLICATION_XML;
 +import static javax.ws.rs.core.MediaType.TEXT_PLAIN;
 +
 +import static javax.ws.rs.core.Response.ok;
 +import static javax.ws.rs.core.Response.created;
 +import static javax.ws.rs.core.Response.notModified;
 +import static javax.ws.rs.core.Response.status;
 +
 +
 +@Path("/api/v1")
 +public class TopologiesResource {
 +
 +  private static final String XML_EXT  = ".xml";
 +  private static final String JSON_EXT = ".json";
 +
 +  private static final String TOPOLOGIES_API_PATH    = "topologies";
 +  private static final String SINGLE_TOPOLOGY_API_PATH = TOPOLOGIES_API_PATH + "/{id}";
 +  private static final String PROVIDERCONFIG_API_PATH = "providerconfig";
 +  private static final String SINGLE_PROVIDERCONFIG_API_PATH = PROVIDERCONFIG_API_PATH + "/{name}";
 +  private static final String DESCRIPTORS_API_PATH    = "descriptors";
 +  private static final String SINGLE_DESCRIPTOR_API_PATH = DESCRIPTORS_API_PATH + "/{name}";
 +
 +  private static GatewaySpiMessages log = MessagesFactory.get(GatewaySpiMessages.class);
 +
 +  @Context
 +  private HttpServletRequest request;
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Topology getTopology(@PathParam("id") String id) {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +    GatewayConfig config = (GatewayConfig) request.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +      if(t.getName().equals(id)) {
 +        try {
 +          t.setUri(new URI( buildURI(t, config, request) ));
 +        } catch (URISyntaxException se) {
 +          t.setUri(null);
 +        }
 +        return BeanConverter.getTopology(t);
 +      }
 +    }
 +    return null;
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, APPLICATION_XML})
 +  @Path(TOPOLOGIES_API_PATH)
 +  public SimpleTopologyWrapper getTopologies() {
 +    GatewayServices services = (GatewayServices) request.getServletContext()
 +        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    ArrayList<SimpleTopology> st = new ArrayList<SimpleTopology>();
 +    GatewayConfig conf = (GatewayConfig) request.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
 +
 +    for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +      st.add(getSimpleTopology(t, conf));
 +    }
 +
 +    Collections.sort(st, new TopologyComparator());
 +    SimpleTopologyWrapper stw = new SimpleTopologyWrapper();
 +
 +    for(SimpleTopology t : st){
 +      stw.topologies.add(t);
 +    }
 +
 +    return stw;
 +
 +  }
 +
 +  @PUT
 +  @Consumes({APPLICATION_JSON, APPLICATION_XML})
 +  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Topology uploadTopology(@PathParam("id") String id, Topology t) {
++    Topology result = null;
 +
-     ts.deployTopology(BeanConverter.getTopology(t));
++    GatewayServices gs =
++                (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    t.setName(id);
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
-     return getTopology(id);
++    // Check for existing topology with the same name, to see if it had been generated
++    boolean existingGenerated = false;
++    for (org.apache.hadoop.gateway.topology.Topology existingTopology : ts.getTopologies()) {
++      if(existingTopology.getName().equals(id)) {
++        existingGenerated = existingTopology.isGenerated();
++        break;
++      }
++    }
++
++    // If a topology with the same ID exists, which had been generated, then DO NOT overwrite it because it will be
++    // out of sync with the source descriptor. Otherwise, deploy the updated version.
++    if (!existingGenerated) {
++      ts.deployTopology(BeanConverter.getTopology(t));
++      result = getTopology(id);
++    } else {
++      log.disallowedOverwritingGeneratedTopology(id);
++    }
 +
++    return result;
 +  }
 +
 +  @DELETE
 +  @Produces(APPLICATION_JSON)
 +  @Path(SINGLE_TOPOLOGY_API_PATH)
 +  public Response deleteTopology(@PathParam("id") String id) {
 +    boolean deleted = false;
 +    if(!"admin".equals(id)) {
 +      GatewayServices services = (GatewayServices) request.getServletContext()
 +          .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +      for (org.apache.knox.gateway.topology.Topology t : ts.getTopologies()) {
 +        if(t.getName().equals(id)) {
 +          ts.deleteTopology(t);
 +          deleted = true;
 +        }
 +      }
 +    }else{
 +      deleted = false;
 +    }
 +    return ok().entity("{ \"deleted\" : " + deleted + " }").build();
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_JSON})
 +  @Path(PROVIDERCONFIG_API_PATH)
 +  public HrefListing getProviderConfigurations() {
 +    HrefListing listing = new HrefListing();
 +    listing.setHref(buildHref(request));
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    List<HrefListItem> configs = new ArrayList<>();
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +    // Get all the simple descriptor file names
 +    for (File providerConfig : ts.getProviderConfigurations()){
 +      String id = FilenameUtils.getBaseName(providerConfig.getName());
 +      configs.add(new HrefListItem(buildHref(id, request), providerConfig.getName()));
 +    }
 +
 +    listing.setItems(configs);
 +    return listing;
 +  }
 +
 +  @GET
 +  @Produces({APPLICATION_XML})
 +  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
 +  public Response getProviderConfiguration(@PathParam("name") String name) {
 +    Response response;
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    File providerConfigFile = null;
 +
 +    for (File pc : ts.getProviderConfigurations()){
 +      // If the file name matches the specified id
 +      if (FilenameUtils.getBaseName(pc.getName()).equals(name)) {
 +        providerConfigFile = pc;
 +        break;
 +      }
 +    }
 +
 +    if (providerConfigFile != null) {
 +      byte[] content = null;
 +      try {
 +        content = FileUtils.readFileToByteArray(providerConfigFile);
 +        response = ok().entity(content).build();
 +      } catch (IOException e) {
 +        log.failedToReadConfigurationFile(providerConfigFile.getAbsolutePath(), e);
 +        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
 +      }
 +
 +    } else {
 +      response = Response.status(Response.Status.NOT_FOUND).build();
 +    }
 +    return response;
 +  }
 +
 +  @DELETE
 +  @Produces(APPLICATION_JSON)
 +  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
 +  public Response deleteProviderConfiguration(@PathParam("name") String name) {
 +    Response response;
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +    if (ts.deleteProviderConfiguration(name)) {
 +      response = ok().entity("{ \"deleted\" : \"provider config " + name + "\" }").build();
 +    } else {
 +      response = notModified().build();
 +    }
 +    return response;
 +  }
 +
 +
 +  @DELETE
 +  @Produces(APPLICATION_JSON)
 +  @Path(SINGLE_DESCRIPTOR_API_PATH)
 +  public Response deleteSimpleDescriptor(@PathParam("name") String name) {
 +    Response response = null;
 +    if(!"admin".equals(name)) {
 +      GatewayServices services =
 +              (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +      TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +      if (ts.deleteDescriptor(name)) {
 +        response = ok().entity("{ \"deleted\" : \"descriptor " + name + "\" }").build();
 +      }
 +    }
 +
 +    if (response == null) {
 +      response = notModified().build();
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  @PUT
 +  @Consumes({APPLICATION_XML})
 +  @Path(SINGLE_PROVIDERCONFIG_API_PATH)
 +  public Response uploadProviderConfiguration(@PathParam("name") String name, String content) {
 +    Response response = null;
 +
 +    GatewayServices gs =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    boolean isUpdate = configFileExists(ts.getProviderConfigurations(), name);
 +
 +    String filename = name.endsWith(XML_EXT) ? name : name + XML_EXT;
 +    if (ts.deployProviderConfiguration(filename, content)) {
 +      try {
 +        if (isUpdate) {
 +          response = Response.noContent().build();
 +        } else{
 +          response = created(new URI(buildHref(request))).build();
 +        }
 +      } catch (URISyntaxException e) {
 +        log.invalidResourceURI(e.getInput(), e.getReason(), e);
 +        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy provider configuration " + name + "\" }").build();
 +      }
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  private boolean configFileExists(Collection<File> existing, String candidateName) {
 +    boolean result = false;
 +    for (File exists : existing) {
 +      if (FilenameUtils.getBaseName(exists.getName()).equals(candidateName)) {
 +        result = true;
 +        break;
 +      }
 +    }
 +    return result;
 +  }
 +
 +
 +  @PUT
 +  @Consumes({APPLICATION_JSON})
 +  @Path(SINGLE_DESCRIPTOR_API_PATH)
 +  public Response uploadSimpleDescriptor(@PathParam("name") String name, String content) {
 +    Response response = null;
 +
 +    GatewayServices gs =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = gs.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    boolean isUpdate = configFileExists(ts.getDescriptors(), name);
 +
 +    String filename = name.endsWith(JSON_EXT) ? name : name + JSON_EXT;
 +    if (ts.deployDescriptor(filename, content)) {
 +      try {
 +        if (isUpdate) {
 +          response = Response.noContent().build();
 +        } else {
 +          response = created(new URI(buildHref(request))).build();
 +        }
 +      } catch (URISyntaxException e) {
 +        log.invalidResourceURI(e.getInput(), e.getReason(), e);
 +        response = status(Response.Status.BAD_REQUEST).entity("{ \"error\" : \"Failed to deploy descriptor " + name + "\" }").build();
 +      }
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  @GET
 +  @Produces({APPLICATION_JSON})
 +  @Path(DESCRIPTORS_API_PATH)
 +  public HrefListing getSimpleDescriptors() {
 +    HrefListing listing = new HrefListing();
 +    listing.setHref(buildHref(request));
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    List<HrefListItem> descriptors = new ArrayList<>();
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +    for (File descriptor : ts.getDescriptors()){
 +      String id = FilenameUtils.getBaseName(descriptor.getName());
 +      descriptors.add(new HrefListItem(buildHref(id, request), descriptor.getName()));
 +    }
 +
 +    listing.setItems(descriptors);
 +    return listing;
 +  }
 +
 +
 +  @GET
 +  @Produces({APPLICATION_JSON, TEXT_PLAIN})
 +  @Path(SINGLE_DESCRIPTOR_API_PATH)
 +  public Response getSimpleDescriptor(@PathParam("name") String name) {
 +    Response response;
 +
 +    GatewayServices services =
 +            (GatewayServices) request.getServletContext().getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +
 +    TopologyService ts = services.getService(GatewayServices.TOPOLOGY_SERVICE);
 +
 +    File descriptorFile = null;
 +
 +    for (File sd : ts.getDescriptors()){
 +      // If the file name matches the specified id
 +      if (FilenameUtils.getBaseName(sd.getName()).equals(name)) {
 +        descriptorFile = sd;
 +        break;
 +      }
 +    }
 +
 +    if (descriptorFile != null) {
 +      String mediaType = APPLICATION_JSON;
 +
 +      byte[] content = null;
 +      try {
 +        if ("yml".equals(FilenameUtils.getExtension(descriptorFile.getName()))) {
 +          mediaType = TEXT_PLAIN;
 +        }
 +        content = FileUtils.readFileToByteArray(descriptorFile);
 +        response = ok().type(mediaType).entity(content).build();
 +      } catch (IOException e) {
 +        log.failedToReadConfigurationFile(descriptorFile.getAbsolutePath(), e);
 +        response = Response.status(Response.Status.INTERNAL_SERVER_ERROR).build();
 +      }
 +    } else {
 +      response = Response.status(Response.Status.NOT_FOUND).build();
 +    }
 +
 +    return response;
 +  }
 +
 +
 +  private static class TopologyComparator implements Comparator<SimpleTopology> {
 +    @Override
 +    public int compare(SimpleTopology t1, SimpleTopology t2) {
 +      return t1.getName().compareTo(t2.getName());
 +    }
 +  }
 +
 +
 +  String buildURI(org.apache.knox.gateway.topology.Topology topology, GatewayConfig config, HttpServletRequest req){
 +    String uri = buildXForwardBaseURL(req);
 +
 +    // Strip extra context
 +    uri = uri.replace(req.getContextPath(), "");
 +
 +    // Add the gateway path
 +    String gatewayPath;
 +    if(config.getGatewayPath() != null){
 +      gatewayPath = config.getGatewayPath();
 +    }else{
 +      gatewayPath = "gateway";
 +    }
 +    uri += "/" + gatewayPath;
 +
 +    uri += "/" + topology.getName();
 +    return uri;
 +  }
 +
 +  String buildHref(HttpServletRequest req) {
 +    return buildHref((String)null, req);
 +  }
 +
 +  String buildHref(String id, HttpServletRequest req) {
 +    String href = buildXForwardBaseURL(req);
 +    // Make sure that the pathInfo doesn't have any '/' chars at the end.
 +    String pathInfo = req.getPathInfo();
 +    while(pathInfo.endsWith("/")) {
 +      pathInfo = pathInfo.substring(0, pathInfo.length() - 1);
 +    }
 +
 +    href += pathInfo;
 +
 +    if (id != null) {
 +      href += "/" + id;
 +    }
 +
 +    return href;
 +  }
 +
 +   String buildHref(org.apache.knox.gateway.topology.Topology t, HttpServletRequest req) {
 +     return buildHref(t.getName(), req);
 +  }
 +
 +  private SimpleTopology getSimpleTopology(org.apache.knox.gateway.topology.Topology t, GatewayConfig config) {
 +    String uri = buildURI(t, config, request);
 +    String href = buildHref(t, request);
 +    return new SimpleTopology(t, uri, href);
 +  }
 +
 +  private String buildXForwardBaseURL(HttpServletRequest req){
 +    final String X_Forwarded = "X-Forwarded-";
 +    final String X_Forwarded_Context = X_Forwarded + "Context";
 +    final String X_Forwarded_Proto = X_Forwarded + "Proto";
 +    final String X_Forwarded_Host = X_Forwarded + "Host";
 +    final String X_Forwarded_Port = X_Forwarded + "Port";
 +    final String X_Forwarded_Server = X_Forwarded + "Server";
 +
 +    String baseURL = "";
 +
 +    // Get Protocol
 +    if(req.getHeader(X_Forwarded_Proto) != null){
 +      baseURL += req.getHeader(X_Forwarded_Proto) + "://";
 +    } else {
 +      baseURL += req.getProtocol() + "://";
 +    }
 +
 +    // Handle Server/Host and Port Here
 +    if (req.getHeader(X_Forwarded_Host) != null && req.getHeader(X_Forwarded_Port) != null){
 +      // Double check to see if host has port
 +      if(req.getHeader(X_Forwarded_Host).contains(req.getHeader(X_Forwarded_Port))){
 +        baseURL += req.getHeader(X_Forwarded_Host);
 +      } else {
 +        // If there's no port, add the host and port together;
 +        baseURL += req.getHeader(X_Forwarded_Host) + ":" + req.getHeader(X_Forwarded_Port);
 +      }
 +    } else if(req.getHeader(X_Forwarded_Server) != null && req.getHeader(X_Forwarded_Port) != null){
 +      // Tack on the server and port if they're available. Try host if server not available
 +      baseURL += req.getHeader(X_Forwarded_Server) + ":" + req.getHeader(X_Forwarded_Port);
 +    } else if(req.getHeader(X_Forwarded_Port) != null) {
 +      // if we at least have a port, we can use it.
 +      baseURL += req.getServerName() + ":" + req.getHeader(X_Forwarded_Port);
 +    } else {
 +      // Resort to request members
 +      baseURL += req.getServerName() + ":" + req.getLocalPort();
 +    }
 +
 +    // Handle Server context
 +    if( req.getHeader(X_Forwarded_Context) != null ) {
 +      baseURL += req.getHeader( X_Forwarded_Context );
 +    } else {
 +      baseURL += req.getContextPath();
 +    }
 +
 +    return baseURL;
 +  }
 +
 +
 +  static class HrefListing {
 +    @JsonProperty
 +    String href;
 +
 +    @JsonProperty
 +    List<HrefListItem> items;
 +
 +    HrefListing() {}
 +
 +    public void setHref(String href) {
 +      this.href = href;
 +    }
 +
 +    public String getHref() {
 +      return href;
 +    }
 +
 +    public void setItems(List<HrefListItem> items) {
 +      this.items = items;
 +    }
 +
 +    public List<HrefListItem> getItems() {
 +      return items;
 +    }
 +  }
 +
 +  static class HrefListItem {
 +    @JsonProperty
 +    String href;
 +
 +    @JsonProperty
 +    String name;
 +
 +    HrefListItem() {}
 +
 +    HrefListItem(String href, String name) {
 +      this.href = href;
 +      this.name = name;
 +    }
 +
 +    public void setHref(String href) {
 +      this.href = href;
 +    }
 +
 +    public String getHref() {
 +      return href;
 +    }
 +
 +    public void setName(String name) {
 +      this.name = name;
 +    }
 +    public String getName() {
 +      return name;
 +    }
 +  }
 +
 +
 +  @XmlAccessorType(XmlAccessType.NONE)
 +  public static class SimpleTopology {
 +
 +    @XmlElement
 +    private String name;
 +    @XmlElement
 +    private String timestamp;
 +    @XmlElement
 +    private String defaultServicePath;
 +    @XmlElement
 +    private String uri;
 +    @XmlElement
 +    private String href;
 +
 +    public SimpleTopology() {}
 +
 +    public SimpleTopology(org.apache.knox.gateway.topology.Topology t, String uri, String href) {
 +      this.name = t.getName();
 +      this.timestamp = Long.toString(t.getTimestamp());
 +      this.defaultServicePath = t.getDefaultServicePath();
 +      this.uri = uri;
 +      this.href = href;
 +    }
 +
 +    public String getName() {
 +      return name;
 +    }
 +
 +    public void setName(String n) {
 +      name = n;
 +    }
 +
 +    public String getTimestamp() {
 +      return timestamp;
 +    }
 +
 +    public void setDefaultService(String defaultServicePath) {
 +      this.defaultServicePath = defaultServicePath;
 +    }
 +
 +    public String getDefaultService() {
 +      return defaultServicePath;
 +    }
 +
 +    public void setTimestamp(String timestamp) {
 +      this.timestamp = timestamp;
 +    }
 +
 +    public String getUri() {
 +      return uri;
 +    }
 +
 +    public void setUri(String uri) {
 +      this.uri = uri;
 +    }
 +
 +    public String getHref() {
 +      return href;
 +    }
 +
 +    public void setHref(String href) {
 +      this.href = href;
 +    }
 +  }
 +
 +  @XmlAccessorType(XmlAccessType.FIELD)
 +  public static class SimpleTopologyWrapper{
 +
 +    @XmlElement(name="topology")
 +    @XmlElementWrapper(name="topologies")
 +    private List<SimpleTopology> topologies = new ArrayList<SimpleTopology>();
 +
 +    public List<SimpleTopology> getTopologies(){
 +      return topologies;
 +    }
 +
 +    public void setTopologies(List<SimpleTopology> ts){
 +      this.topologies = ts;
 +    }
 +
 +  }
 +}
 +
index e8d6915,0000000..e916568
mode 100644,000000..100644
--- /dev/null
@@@ -1,170 -1,0 +1,172 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin.beans;
 +
 +import org.apache.knox.gateway.topology.Version;
 +
 +import java.util.Collection;
 +
 +public class BeanConverter {
 +
 +  public static Topology getTopology(
 +      org.apache.knox.gateway.topology.Topology topology) {
 +    Topology topologyResource = new Topology();
 +    topologyResource.setName(topology.getName());
 +    topologyResource.setTimestamp(topology.getTimestamp());
 +    topologyResource.setPath(topology.getDefaultServicePath());
 +    topologyResource.setUri(topology.getUri());
++    topologyResource.setGenerated(topology.isGenerated());
 +    for ( org.apache.knox.gateway.topology.Provider provider : topology.getProviders() ) {
 +      topologyResource.getProviders().add( getProvider(provider) );
 +    }
 +    for ( org.apache.knox.gateway.topology.Service service : topology.getServices() ) {
 +      topologyResource.getServices().add( getService(service) );
 +    }
 +    for ( org.apache.knox.gateway.topology.Application application : topology.getApplications() ) {
 +      topologyResource.getApplications().add( getApplication(application) );
 +    }
 +    return topologyResource;
 +  }
 +
 +  public static org.apache.knox.gateway.topology.Topology getTopology(Topology topology) {
 +    org.apache.knox.gateway.topology.Topology deploymentTopology = new org.apache.knox.gateway.topology.Topology();
 +    deploymentTopology.setName(topology.getName());
 +    deploymentTopology.setTimestamp(topology.getTimestamp());
 +    deploymentTopology.setDefaultServicePath(topology.getPath());
 +    deploymentTopology.setUri(topology.getUri());
++    deploymentTopology.setGenerated(topology.isGenerated());
 +    for ( Provider provider : topology.getProviders() ) {
 +      deploymentTopology.addProvider( getProvider(provider) );
 +    }
 +    for ( Service service : topology.getServices() ) {
 +      deploymentTopology.addService( getService(service) );
 +    }
 +    for ( Application application : topology.getApplications() ) {
 +      deploymentTopology.addApplication( getApplication(application) );
 +    }
 +    return deploymentTopology;
 +  }
 +
 +  private static Provider getProvider(
 +      org.apache.knox.gateway.topology.Provider provider) {
 +    Provider providerResource = new Provider();
 +    providerResource.setName(provider.getName());
 +    providerResource.setEnabled(provider.isEnabled());
 +    providerResource.setRole(provider.getRole());
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = provider.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        providerResource.getParams().add(getParam(param));
 +      }
 +    }
 +    return providerResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Provider getProvider(Provider provider) {
 +    org.apache.knox.gateway.topology.Provider deploymentProvider = new org.apache.knox.gateway.topology.Provider();
 +    deploymentProvider.setName(provider.getName());
 +    deploymentProvider.setEnabled(provider.isEnabled());
 +    deploymentProvider.setRole(provider.getRole());
 +    for ( Param param : provider.getParams() ) {
 +      deploymentProvider.addParam( getParam(param) );
 +    }
 +    return deploymentProvider;
 +  }
 +
 +  private static Service getService(
 +      org.apache.knox.gateway.topology.Service service) {
 +    Service serviceResource = new Service();
 +    serviceResource.setRole(service.getRole());
 +    serviceResource.setName(service.getName());
 +    Version version = service.getVersion();
 +    if (version != null) {
 +      serviceResource.setVersion(version.toString());
 +    }
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = service.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        serviceResource.getParams().add(getParam(param));
 +      }
 +    }
 +    for ( String url : service.getUrls() ) {
 +      serviceResource.getUrls().add( url );
 +    }
 +    return serviceResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Service getService(Service service) {
 +    org.apache.knox.gateway.topology.Service deploymentService = new org.apache.knox.gateway.topology.Service();
 +    deploymentService.setRole(service.getRole());
 +    deploymentService.setName(service.getName());
 +    if (service.getVersion() != null) {
 +      deploymentService.setVersion(new Version(service.getVersion()));
 +    }
 +    for ( Param param : service.getParams() ) {
 +      deploymentService.addParam( getParam(param) );
 +    }
 +    for ( String url : service.getUrls() ) {
 +      deploymentService.addUrl( url );
 +    }
 +    return deploymentService;
 +  }
 +
 +  private static Application getApplication(
 +      org.apache.knox.gateway.topology.Application application) {
 +    Application applicationResource = new Application();
 +    applicationResource.setRole(application.getRole());
 +    applicationResource.setName(application.getName());
 +    Version version = application.getVersion();
 +    if (version != null) {
 +      applicationResource.setVersion(version.toString());
 +    }
 +    Collection<org.apache.knox.gateway.topology.Param> paramsList = application.getParamsList();
 +    if (paramsList != null && !paramsList.isEmpty()) {
 +      for ( org.apache.knox.gateway.topology.Param param : paramsList ) {
 +        applicationResource.getParams().add(getParam(param));
 +      }
 +    }
 +    for ( String url : application.getUrls() ) {
 +      applicationResource.getUrls().add( url );
 +    }
 +    return applicationResource;
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Application getApplication(Application application) {
 +    org.apache.knox.gateway.topology.Application applicationResource = new org.apache.knox.gateway.topology.Application();
 +    applicationResource.setRole(application.getRole());
 +    applicationResource.setName(application.getName());
 +    if (application.getVersion() != null) {
 +      applicationResource.setVersion(new Version(application.getVersion()));
 +    }
 +    for ( Param param : application.getParams() ) {
 +      applicationResource.addParam( getParam(param) );
 +    }
 +    for ( String url : application.getUrls() ) {
 +      applicationResource.getUrls().add( url );
 +    }
 +    return applicationResource;
 +  }
 +
 +  private static Param getParam(org.apache.knox.gateway.topology.Param param) {
 +    return new Param(param.getName(), param.getValue());
 +  }
 +
 +  private static org.apache.knox.gateway.topology.Param getParam(Param param) {
 +    return new org.apache.knox.gateway.topology.Param(param.getName(), param.getValue());
 +  }
 +}
index 2d2eab8,0000000..e1a8279
mode 100644,000000..100644
--- /dev/null
@@@ -1,119 -1,0 +1,130 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.admin.beans;
 +
 +import javax.xml.bind.annotation.XmlElement;
 +import javax.xml.bind.annotation.XmlElementWrapper;
 +import javax.xml.bind.annotation.XmlRootElement;
 +import java.net.URI;
 +import java.util.ArrayList;
 +import java.util.List;
 +
 +@XmlRootElement(name="topology")
 +public class Topology {
 +
 +  @XmlElement
 +  private URI uri;
 +
 +  @XmlElement
 +  private String name;
 +
 +  @XmlElement
 +  private String path;
 +
 +  @XmlElement
 +  private long timestamp;
 +
++  @XmlElement(name="generated")
++  private boolean isGenerated;
++
 +  @XmlElement(name="provider")
 +  @XmlElementWrapper(name="gateway")
 +  public List<Provider> providers;
 +
 +  @XmlElement(name="service")
 +  public List<Service> services;
 +
 +  @XmlElement(name="application")
 +  private List<Application> applications;
 +
 +  public Topology() {
 +  }
 +
 +  public URI getUri() {
 +    return uri;
 +  }
 +
 +  public void setUri( URI uri ) {
 +    this.uri = uri;
 +  }
 +
 +  public String getName() {
 +    return name;
 +  }
 +
 +  public void setName( String name ) {
 +    this.name = name;
 +  }
 +
 +  public long getTimestamp() {
 +    return timestamp;
 +  }
 +
 +  public void setPath( String defaultServicePath ) {
 +    this.path = defaultServicePath;
 +  }
 +
 +  public String getPath() {
 +    return path;
 +  }
 +
 +  public void setTimestamp( long timestamp ) {
 +    this.timestamp = timestamp;
 +  }
 +
++  public boolean isGenerated() {
++    return isGenerated;
++  }
++
++  public void setGenerated(boolean isGenerated) {
++    this.isGenerated = isGenerated;
++  }
++
 +  public List<Service> getServices() {
 +    if (services == null) {
 +      services = new ArrayList<>();
 +    }
 +    return services;
 +  }
 +
 +  public List<Application> getApplications() {
 +    if (applications == null) {
 +      applications = new ArrayList<>();
 +    }
 +    return applications;
 +  }
 +
 +  public List<Provider> getProviders() {
 +    if (providers == null) {
 +      providers = new ArrayList<>();
 +    }
 +    return providers;
 +  }
 +
 +  public void setProviders(List<Provider> providers) {
 +    this.providers = providers;
 +  }
 +
 +  public void setServices(List<Service> services) {
 +    this.services = services;
 +  }
 +
 +  public void setApplications(List<Application> applications) {
 +    this.applications = applications;
 +  }
 +}
index 3fe81e8,0000000..ac82b39
mode 100644,000000..100644
--- /dev/null
@@@ -1,80 -1,0 +1,91 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.service.definition;
 +
 +import javax.xml.bind.annotation.XmlAttribute;
 +import javax.xml.bind.annotation.XmlType;
 +
 +@XmlType(name = "dispatch")
 +public class CustomDispatch {
 +
 +  private String contributorName;
 +
 +  private String haContributorName;
 +
 +  private String className;
 +
 +  private String haClassName;
 +
 +  private String httpClientFactory;
 +
++  private boolean useTwoWaySsl = false;
++
 +  @XmlAttribute(name = "contributor-name")
 +  public String getContributorName() {
 +    return contributorName;
 +  }
 +
 +  public void setContributorName(String contributorName) {
 +    this.contributorName = contributorName;
 +  }
 +
 +  @XmlAttribute(name = "ha-contributor-name")
 +  public String getHaContributorName() {
 +    return haContributorName;
 +  }
 +
 +  public void setHaContributorName(String haContributorName) {
 +    this.haContributorName = haContributorName;
 +  }
 +
 +  @XmlAttribute(name = "classname")
 +  public String getClassName() {
 +    return className;
 +  }
 +
 +  public void setClassName(String className) {
 +    this.className = className;
 +  }
 +
 +  @XmlAttribute(name = "ha-classname")
 +  public String getHaClassName() {
 +    return haClassName;
 +  }
 +
 +  public void setHaClassName(String haContributorClassName) {
 +    this.haClassName = haContributorClassName;
 +  }
 +
 +  @XmlAttribute(name = "http-client-factory")
 +  public String getHttpClientFactory() {
 +    return httpClientFactory;
 +  }
 +
 +  public void setHttpClientFactory(String httpClientFactory) {
 +    this.httpClientFactory = httpClientFactory;
 +  }
++
++  @XmlAttribute(name = "use-two-way-ssl")
++  public boolean getUseTwoWaySsl() {
++    return useTwoWaySsl;
++  }
++
++  public void setUseTwoWaySsl(boolean useTwoWaySsl) {
++    this.useTwoWaySsl = useTwoWaySsl;
++  }
 +}
Simple merge
Simple merge
index e822364,0000000..dcb7465
mode 100644,000000..100644
--- /dev/null
@@@ -1,233 -1,0 +1,270 @@@
- import javax.servlet.FilterConfig;
- import java.io.IOException;
- import java.security.Principal;
- import java.util.Collections;
- import java.util.Date;
- import java.util.List;
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.dispatch;
 +
++import java.io.IOException;
++import java.security.KeyStore;
++import java.security.Principal;
++import java.util.Collections;
++import java.util.Date;
++import java.util.List;
++
++import javax.net.ssl.SSLContext;
++import javax.servlet.FilterConfig;
++
++import org.apache.knox.gateway.services.security.AliasService;
++import org.apache.knox.gateway.services.security.AliasServiceException;
++import org.apache.knox.gateway.services.security.KeystoreService;
++import org.apache.knox.gateway.services.security.MasterService;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.services.GatewayServices;
 +import org.apache.knox.gateway.services.metrics.MetricsService;
 +import org.apache.http.HttpRequest;
 +import org.apache.http.HttpResponse;
 +import org.apache.http.ProtocolException;
 +import org.apache.http.auth.AuthSchemeProvider;
 +import org.apache.http.auth.AuthScope;
 +import org.apache.http.auth.Credentials;
 +import org.apache.http.client.CookieStore;
 +import org.apache.http.client.CredentialsProvider;
 +import org.apache.http.client.HttpClient;
 +import org.apache.http.client.HttpRequestRetryHandler;
 +import org.apache.http.client.RedirectStrategy;
 +import org.apache.http.client.config.AuthSchemes;
 +import org.apache.http.client.config.RequestConfig;
 +import org.apache.http.client.methods.HttpUriRequest;
 +import org.apache.http.config.Registry;
 +import org.apache.http.config.RegistryBuilder;
++import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
++import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
 +import org.apache.http.cookie.Cookie;
 +import org.apache.http.impl.DefaultConnectionReuseStrategy;
 +import org.apache.http.impl.client.BasicCredentialsProvider;
 +import org.apache.http.impl.client.DefaultConnectionKeepAliveStrategy;
 +import org.apache.http.impl.client.HttpClientBuilder;
 +import org.apache.http.impl.client.HttpClients;
 +import org.apache.http.protocol.HttpContext;
++import org.apache.http.ssl.SSLContexts;
 +import org.joda.time.Period;
 +import org.joda.time.format.PeriodFormatter;
 +import org.joda.time.format.PeriodFormatterBuilder;
 +
-       GatewayServices services = (GatewayServices) filterConfig.getServletContext()
-           .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +public class DefaultHttpClientFactory implements HttpClientFactory {
 +
 +  @Override
 +  public HttpClient createHttpClient(FilterConfig filterConfig) {
 +    HttpClientBuilder builder = null;
 +    GatewayConfig gatewayConfig = (GatewayConfig) filterConfig.getServletContext().getAttribute(GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE);
++    GatewayServices services = (GatewayServices) filterConfig.getServletContext()
++        .getAttribute(GatewayServices.GATEWAY_SERVICES_ATTRIBUTE);
 +    if (gatewayConfig != null && gatewayConfig.isMetricsEnabled()) {
 +      MetricsService metricsService = services.getService(GatewayServices.METRICS_SERVICE);
 +      builder = metricsService.getInstrumented(HttpClientBuilder.class);
 +    } else {
 +      builder = HttpClients.custom();
 +    }
++    if (Boolean.parseBoolean(filterConfig.getInitParameter("useTwoWaySsl"))) {
++      char[] keypass = null;
++      MasterService ms = services.getService("MasterService");
++      AliasService as = services.getService(GatewayServices.ALIAS_SERVICE);
++      try {
++        keypass = as.getGatewayIdentityPassphrase();
++      } catch (AliasServiceException e) {
++        // nop - default passphrase will be used
++      }
++      if (keypass == null) {
++        // there has been no alias created for the key - let's assume it is the same as the keystore password
++        keypass = ms.getMasterSecret();
++      }
++
++      KeystoreService ks = services.getService(GatewayServices.KEYSTORE_SERVICE);
++      final SSLContext sslcontext;
++      try {
++        KeyStore keystoreForGateway = ks.getKeystoreForGateway();
++        sslcontext = SSLContexts.custom()
++            .loadTrustMaterial(keystoreForGateway, new TrustSelfSignedStrategy())
++            .loadKeyMaterial(keystoreForGateway, keypass)
++            .build();
++      } catch (Exception e) {
++        throw new IllegalArgumentException("Unable to create SSLContext", e);
++      }
++      builder.setSSLSocketFactory(new SSLConnectionSocketFactory(sslcontext));
++    }
 +    if ( "true".equals(System.getProperty(GatewayConfig.HADOOP_KERBEROS_SECURED)) ) {
 +      CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
 +      credentialsProvider.setCredentials(AuthScope.ANY, new UseJaasCredentials());
 +
 +      Registry<AuthSchemeProvider> authSchemeRegistry = RegistryBuilder.<AuthSchemeProvider>create()
 +          .register(AuthSchemes.SPNEGO, new KnoxSpnegoAuthSchemeFactory(true))
 +          .build();
 +
 +      builder = builder.setDefaultAuthSchemeRegistry(authSchemeRegistry)
 +          .setDefaultCookieStore(new HadoopAuthCookieStore())
 +          .setDefaultCredentialsProvider(credentialsProvider);
 +    } else {
 +      builder = builder.setDefaultCookieStore(new NoCookieStore());
 +    }
 +
 +    builder.setKeepAliveStrategy( DefaultConnectionKeepAliveStrategy.INSTANCE );
 +    builder.setConnectionReuseStrategy( DefaultConnectionReuseStrategy.INSTANCE );
 +    builder.setRedirectStrategy( new NeverRedirectStrategy() );
 +    builder.setRetryHandler( new NeverRetryHandler() );
 +
 +    int maxConnections = getMaxConnections( filterConfig );
 +    builder.setMaxConnTotal( maxConnections );
 +    builder.setMaxConnPerRoute( maxConnections );
 +
 +    builder.setDefaultRequestConfig( getRequestConfig( filterConfig ) );
 +
 +    HttpClient client = builder.build();
 +    return client;
 +  }
 +
 +  private static RequestConfig getRequestConfig( FilterConfig config ) {
 +    RequestConfig.Builder builder = RequestConfig.custom();
 +    int connectionTimeout = getConnectionTimeout( config );
 +    if ( connectionTimeout != -1 ) {
 +      builder.setConnectTimeout( connectionTimeout );
 +      builder.setConnectionRequestTimeout( connectionTimeout );
 +    }
 +    int socketTimeout = getSocketTimeout( config );
 +    if( socketTimeout != -1 ) {
 +      builder.setSocketTimeout( socketTimeout );
 +    }
 +    return builder.build();
 +  }
 +
 +  private static class NoCookieStore implements CookieStore {
 +    @Override
 +    public void addCookie(Cookie cookie) {
 +      //no op
 +    }
 +
 +    @Override
 +    public List<Cookie> getCookies() {
 +      return Collections.emptyList();
 +    }
 +
 +    @Override
 +    public boolean clearExpired(Date date) {
 +      return true;
 +    }
 +
 +    @Override
 +    public void clear() {
 +      //no op
 +    }
 +  }
 +
 +  private static class NeverRedirectStrategy implements RedirectStrategy {
 +    @Override
 +    public boolean isRedirected( HttpRequest request, HttpResponse response, HttpContext context )
 +        throws ProtocolException {
 +      return false;
 +    }
 +
 +    @Override
 +    public HttpUriRequest getRedirect( HttpRequest request, HttpResponse response, HttpContext context )
 +        throws ProtocolException {
 +      return null;
 +    }
 +  }
 +
 +  private static class NeverRetryHandler implements HttpRequestRetryHandler {
 +    @Override
 +    public boolean retryRequest( IOException exception, int executionCount, HttpContext context ) {
 +      return false;
 +    }
 +  }
 +
 +  private static class UseJaasCredentials implements Credentials {
 +
 +    public String getPassword() {
 +      return null;
 +    }
 +
 +    public Principal getUserPrincipal() {
 +      return null;
 +    }
 +
 +  }
 +
 +  private int getMaxConnections( FilterConfig filterConfig ) {
 +    int maxConnections = 32;
 +    GatewayConfig config =
 +        (GatewayConfig)filterConfig.getServletContext().getAttribute( GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE );
 +    if( config != null ) {
 +      maxConnections = config.getHttpClientMaxConnections();
 +    }
 +    String str = filterConfig.getInitParameter( "httpclient.maxConnections" );
 +    if( str != null ) {
 +      try {
 +        maxConnections = Integer.parseInt( str );
 +      } catch ( NumberFormatException e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return maxConnections;
 +  }
 +
 +  private static int getConnectionTimeout( FilterConfig filterConfig ) {
 +    int timeout = -1;
 +    GatewayConfig globalConfig =
 +        (GatewayConfig)filterConfig.getServletContext().getAttribute( GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE );
 +    if( globalConfig != null ) {
 +      timeout = globalConfig.getHttpClientConnectionTimeout();
 +    }
 +    String str = filterConfig.getInitParameter( "httpclient.connectionTimeout" );
 +    if( str != null ) {
 +      try {
 +        timeout = (int)parseTimeout( str );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return timeout;
 +  }
 +
 +  private static int getSocketTimeout( FilterConfig filterConfig ) {
 +    int timeout = -1;
 +    GatewayConfig globalConfig =
 +        (GatewayConfig)filterConfig.getServletContext().getAttribute( GatewayConfig.GATEWAY_CONFIG_ATTRIBUTE );
 +    if( globalConfig != null ) {
 +      timeout = globalConfig.getHttpClientSocketTimeout();
 +    }
 +    String str = filterConfig.getInitParameter( "httpclient.socketTimeout" );
 +    if( str != null ) {
 +      try {
 +        timeout = (int)parseTimeout( str );
 +      } catch ( Exception e ) {
 +        // Ignore it and use the default.
 +      }
 +    }
 +    return timeout;
 +  }
 +
 +  private static long parseTimeout( String s ) {
 +    PeriodFormatter f = new PeriodFormatterBuilder()
 +        .appendMinutes().appendSuffix("m"," min")
 +        .appendSeconds().appendSuffix("s"," sec")
 +        .appendMillis().toFormatter();
 +    Period p = Period.parse( s, f );
 +    return p.toStandardDuration().getMillis();
 +  }
 +
 +}
index 27a1905,0000000..42d69d9
mode 100644,000000..100644
--- /dev/null
@@@ -1,91 -1,0 +1,94 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.i18n;
 +
 +import org.apache.knox.gateway.i18n.messages.Message;
 +import org.apache.knox.gateway.i18n.messages.MessageLevel;
 +import org.apache.knox.gateway.i18n.messages.Messages;
 +import org.apache.knox.gateway.i18n.messages.StackTrace;
 +
 +@Messages(logger="org.apache.knox.gateway")
 +public interface GatewaySpiMessages {
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to load the internal principal mapping table: {0}" )
 +  void failedToLoadPrincipalMappingTable( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to execute filter: {0}" )
 +  void failedToExecuteFilter( @StackTrace( level = MessageLevel.DEBUG ) Throwable t );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt passphrase: {0}" )
 +  void failedToEncryptPassphrase( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to generate secret key from password: {0}" )
 +  void failedToGenerateKeyFromPassword( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to create keystore [filename={0}, type={1}]: {2}" )
 +  void failedToCreateKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to load keystore [filename={0}, type={1}]: {2}" )
 +  void failedToLoadKeystore( String fileName, String keyStoreType, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add credential: {1}" )
 +  void failedToAddCredential( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to remove credential: {1}")
 +  void failedToRemoveCredential(@StackTrace(level = MessageLevel.DEBUG) Exception e);
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get credential: {1}" )
 +  void failedToGetCredential(@StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +  
 +  @Message( level = MessageLevel.ERROR, text = "Failed to persist master secret: {0}" )
 +  void failedToPersistMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to encrypt master secret: {0}" )
 +  void failedToEncryptMasterSecret( @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to initialize master service from persistent master {0}: {1}" )
 +  void failedToInitializeFromPersistentMaster( String masterFileName, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to add self signed certificate for Gateway {0}: {1}" )
 +  void failedToAddSeflSignedCertForGateway( String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Failed to get key {0}: {1}" )
 +  void failedToGetKey(String alias, @StackTrace( level = MessageLevel.DEBUG ) Exception e);
 +
 +  @Message( level = MessageLevel.DEBUG, text = "Loading from persistent master: {0}" )
 +  void loadingFromPersistentMaster( String tag );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "ALIAS: {0}" )
 +  void printClusterAlias( String alias );
 +
 +  @Message( level = MessageLevel.DEBUG, text = "MASTER SERVICE == NULL: {0}" )
 +  void printMasterServiceIsNull( boolean masterServiceIsNull );
 +
 +  @Message( level = MessageLevel.ERROR, text = "Gateway has failed to start. Unable to prompt user for master secret setup. Please consider using knoxcli.sh create-master" )
 +  void unableToPromptForMasterUseKnoxCLI();
 +
 +  @Message( level = MessageLevel.ERROR, text = "Error in generating certificate: {0}" )
 +  void failedToGenerateCertificate( @StackTrace( level = MessageLevel.ERROR ) Exception e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Failed to read configuration: {0}")
 +  void failedToReadConfigurationFile(final String filePath, @StackTrace(level = MessageLevel.DEBUG) Exception e );
 +
 +  @Message(level = MessageLevel.ERROR, text = "Invalid resource URI {0} : {1}")
 +  void invalidResourceURI(final String uri, final String reason, @StackTrace(level = MessageLevel.DEBUG) Exception e );
 +
++  @Message( level = MessageLevel.ERROR, text = "Topology {0} cannot be manually overwritten because it was generated from a simple descriptor." )
++  void disallowedOverwritingGeneratedTopology(final String topologyName);
++
 +}
index 815c218,0000000..e46197d
mode 100644,000000..100644
--- /dev/null
@@@ -1,151 -1,0 +1,160 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.topology;
 +
 +import org.apache.commons.collections.map.HashedMap;
 +import org.apache.commons.collections.map.MultiKeyMap;
 +
 +import java.net.URI;
 +import java.util.ArrayList;
 +import java.util.Collection;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +public class Topology {
 +
 +  private URI uri;
 +  private String name;
 +  private String defaultServicePath = null;
 +  private long timestamp;
++  private boolean isGenerated;
 +  public List<Provider> providerList = new ArrayList<Provider>();
 +  private Map<String,Map<String,Provider>> providerMap = new HashMap<>();
 +  public List<Service> services = new ArrayList<Service>();
 +  private MultiKeyMap serviceMap;
 +  private List<Application> applications = new ArrayList<Application>();
 +  private Map<String,Application> applicationMap = new HashMap<>();
 +
 +  public Topology() {
 +    serviceMap = MultiKeyMap.decorate(new HashedMap());
 +  }
 +
 +  public URI getUri() {
 +    return uri;
 +  }
 +
 +  public void setUri( URI uri ) {
 +    this.uri = uri;
 +  }
 +
 +  public String getName() {
 +    return name;
 +  }
 +
 +  public void setName( String name ) {
 +    this.name = name;
 +  }
 +
 +  public long getTimestamp() {
 +    return timestamp;
 +  }
 +
 +  public void setTimestamp( long timestamp ) {
 +    this.timestamp = timestamp;
 +  }
 +
 +  public String getDefaultServicePath() {
 +    return defaultServicePath;
 +  }
 +
 +  public void setDefaultServicePath(String servicePath) {
 +    defaultServicePath = servicePath;
 +  }
 +
++  public void setGenerated(boolean isGenerated) {
++    this.isGenerated = isGenerated;
++  }
++
++  public boolean isGenerated() {
++    return isGenerated;
++  }
++
 +  public Collection<Service> getServices() {
 +    return services;
 +  }
 +
 +  public Service getService( String role, String name, Version version) {
 +    return (Service)serviceMap.get(role, name, version);
 +  }
 +
 +  public void addService( Service service ) {
 +    services.add( service );
 +    serviceMap.put(service.getRole(), service.getName(), service.getVersion(), service);
 +  }
 +
 +  public Collection<Application> getApplications() {
 +    return applications;
 +  }
 +
 +  private static String fixApplicationUrl( String url ) {
 +    if( url == null ) {
 +      url = "/";
 +    }
 +    if( !url.startsWith( "/" ) ) {
 +      url = "/" + url;
 +    }
 +    return url;
 +  }
 +
 +  public Application getApplication(String url) {
 +    return applicationMap.get( fixApplicationUrl( url ) );
 +  }
 +
 +  public void addApplication( Application application ) {
 +    applications.add( application );
 +    List<String> urls = application.getUrls();
 +    if( urls == null || urls.isEmpty() ) {
 +      applicationMap.put( fixApplicationUrl( application.getName() ), application );
 +    } else {
 +      for( String url : application.getUrls() ) {
 +        applicationMap.put( fixApplicationUrl( url ), application );
 +      }
 +    }
 +  }
 +
 +  public Collection<Provider> getProviders() {
 +    return providerList;
 +  }
 +
 +  public Provider getProvider( String role, String name ) {
 +    Provider provider = null;
 +    Map<String,Provider> nameMap = providerMap.get( role );
 +    if( nameMap != null) { 
 +      if( name != null ) {
 +        provider = nameMap.get( name );
 +      }
 +      else {
 +        provider = (Provider) nameMap.values().toArray()[0];
 +      }
 +    }
 +    return provider;
 +  }
 +
 +  public void addProvider( Provider provider ) {
 +    providerList.add( provider );
 +    String role = provider.getRole();
 +    Map<String,Provider> nameMap = providerMap.get( role );
 +    if( nameMap == null ) {
 +      nameMap = new HashMap<>();
 +      providerMap.put( role, nameMap );
 +    }
 +    nameMap.put( provider.getName(), provider );
 +  }
 +
 +}
index c9f262b,0000000..8208e4f
mode 100644,000000..100644
--- /dev/null
@@@ -1,658 -1,0 +1,663 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.knox.gateway.deploy;
 +
 +import java.io.File;
 +import java.io.IOException;
 +import java.net.URISyntaxException;
 +import java.net.URL;
 +import java.util.Arrays;
 +import java.util.Enumeration;
 +import java.util.HashMap;
 +import java.util.Map;
 +import java.util.Set;
 +import java.util.UUID;
 +import javax.xml.parsers.ParserConfigurationException;
 +import javax.xml.transform.TransformerException;
 +import javax.xml.xpath.XPathConstants;
 +import javax.xml.xpath.XPathExpressionException;
 +import javax.xml.xpath.XPathFactory;
 +
 +import org.apache.commons.io.FileUtils;
 +import org.apache.knox.gateway.GatewayTestConfig;
 +import org.apache.knox.gateway.config.GatewayConfig;
 +import org.apache.knox.gateway.filter.XForwardedHeaderFilter;
 +import org.apache.knox.gateway.filter.rewrite.api.UrlRewriteServletFilter;
 +import org.apache.knox.gateway.services.DefaultGatewayServices;
 +import org.apache.knox.gateway.services.ServiceLifecycleException;
 +import org.apache.knox.gateway.topology.Application;
 +import org.apache.knox.gateway.topology.Param;
 +import org.apache.knox.gateway.topology.Provider;
 +import org.apache.knox.gateway.topology.Service;
 +import org.apache.knox.gateway.topology.Topology;
 +import org.apache.knox.gateway.util.XmlUtils;
 +import org.apache.knox.test.TestUtils;
 +import org.apache.knox.test.log.NoOpAppender;
 +import org.apache.log4j.Appender;
 +import org.jboss.shrinkwrap.api.Archive;
 +import org.jboss.shrinkwrap.api.ArchivePath;
 +import org.jboss.shrinkwrap.api.spec.EnterpriseArchive;
 +import org.jboss.shrinkwrap.api.spec.WebArchive;
 +import org.junit.Test;
 +import org.w3c.dom.Document;
 +import org.w3c.dom.Node;
 +import org.xml.sax.SAXException;
 +
 +import static org.apache.knox.test.TestUtils.LOG_ENTER;
 +import static org.apache.knox.test.TestUtils.LOG_EXIT;
 +import static org.hamcrest.CoreMatchers.is;
 +import static org.hamcrest.CoreMatchers.notNullValue;
 +import static org.hamcrest.CoreMatchers.nullValue;
 +import static org.hamcrest.MatcherAssert.assertThat;
 +import static org.hamcrest.core.IsEqual.equalTo;
 +import static org.hamcrest.core.IsNot.not;
 +import static org.hamcrest.xml.HasXPath.hasXPath;
 +import static org.junit.Assert.fail;
 +
 +public class DeploymentFactoryFuncTest {
 +
 +  private static final long SHORT_TIMEOUT = 1000L;
 +  private static final long MEDIUM_TIMEOUT = 5 * SHORT_TIMEOUT;
 +  private static final long LONG_TIMEOUT = 10 * MEDIUM_TIMEOUT;
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testGenericProviderDeploymentContributor() throws ParserConfigurationException, SAXException, IOException, TransformerException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File gatewayDir = new File( targetDir, "gateway-home-" + UUID.randomUUID() );
 +    gatewayDir.mkdirs();
 +
 +    ((GatewayTestConfig) config).setGatewayHomeDir( gatewayDir.getAbsolutePath() );
 +
 +    File deployDir = new File( config.getGatewayDeploymentDir() );
 +    deployDir.mkdirs();
 +
 +    //    ((GatewayTestConfig) config).setDeploymentDir( "clusters" );
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String,String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/test-service-url" );
 +    topology.addService( service );
 +
 +    Provider provider = new Provider();
 +    provider.setRole( "federation" );
 +    provider.setName( "HeaderPreAuth" );
 +    provider.setEnabled( true );
 +    Param param = new Param();
 +    param.setName( "filter" );
 +    param.setValue( "org.opensource.ExistingFilter" );
 +    provider.addParam( param );
 +    param = new Param();
 +    param.setName( "test-param-name" );
 +    param.setValue( "test-param-value" );
 +    provider.addParam( param );
 +    topology.addProvider( provider );
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment( config, topology );
 +
 +    Document gateway = XmlUtils.readXml( war.get( "%2F/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +    //dump( gateway );
 +
 +    //by default the first filter will be the X-Forwarded header filter
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/role", equalTo( "xforwardedheaders" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/name", equalTo( "XForwardedHeaderFilter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/class", equalTo( "org.apache.knox.gateway.filter.XForwardedHeaderFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/role", equalTo( "federation" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/name", equalTo( "HeaderPreAuth" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/class", equalTo( "org.apache.knox.gateway.preauth.filter.HeaderPreAuthFederationFilter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[1]/name", equalTo( "filter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[1]/value", equalTo( "org.opensource.ExistingFilter" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[2]/name", equalTo( "test-param-name" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/param[2]/value", equalTo( "test-param-value" ) ) );
++
++    // testing for the adding of missing identity assertion provider - since it isn't explicitly added above
++    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/role", equalTo( "identity-assertion" ) ) );
++    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/name", equalTo( "Default" ) ) );
++
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = LONG_TIMEOUT )
 +  public void testInvalidGenericProviderDeploymentContributor() throws ParserConfigurationException, SAXException, IOException, TransformerException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File gatewayDir = new File( targetDir, "gateway-home-" + UUID.randomUUID() );
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir( gatewayDir.getAbsolutePath() );
 +    File deployDir = new File( config.getGatewayDeploymentDir() );
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String,String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/test-service-url" );
 +    topology.addService( service );
 +
 +    Provider provider = new Provider();
 +    provider.setRole( "authentication" );
 +    provider.setName( "generic" );
 +    provider.setEnabled( true );
 +    Param param; // = new ProviderParam();
 +    // Missing filter param.
 +    //param.setName( "filter" );
 +    //param.setValue( "org.opensource.ExistingFilter" );
 +    //provider.addParam( param );
 +    param = new Param();
 +    param.setName( "test-param-name" );
 +    param.setValue( "test-param-value" );
 +    provider.addParam( param );
 +    topology.addProvider( provider );
 +
 +    Enumeration<Appender> appenders = NoOpAppender.setUp();
 +    try {
 +      DeploymentFactory.createDeployment( config, topology );
 +      fail( "Should have throws IllegalArgumentException" );
 +    } catch ( DeploymentException e ) {
 +      // Expected.
 +    } finally {
 +      NoOpAppender.tearDown( appenders );
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testSimpleTopology() throws IOException, SAXException, ParserConfigurationException, URISyntaxException, TransformerException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    //Testing without x-forwarded headers filter
 +    ((GatewayTestConfig)config).setXForwardedEnabled(false);
 +    File targetDir = new File( System.getProperty( "user.dir" ), "target" );
 +    File gatewayDir = new File( targetDir, "gateway-home-" + UUID.randomUUID() );
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir( gatewayDir.getAbsolutePath() );
 +    File deployDir = new File( config.getGatewayDeploymentDir() );
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String,String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/webhdfs" );
 +    topology.addService( service );
 +    Provider provider = new Provider();
 +    provider.setRole( "authentication" );
 +    provider.setName( "ShiroProvider" );
 +    provider.setEnabled( true );
 +    Param param = new Param();
 +    param.setName( "contextConfigLocation" );
 +    param.setValue( "classpath:app-context-security.xml" );
 +    provider.addParam( param );
 +    topology.addProvider( provider );
 +    Provider asserter = new Provider();
 +    asserter.setRole( "identity-assertion" );
 +    asserter.setName("Default");
 +    asserter.setEnabled( true );
 +    topology.addProvider( asserter );
 +    Provider authorizer = new Provider();
 +    authorizer.setRole( "authorization" );
 +    authorizer.setName("AclsAuthz");
 +    authorizer.setEnabled( true );
 +    topology.addProvider( authorizer );
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment( config, topology );
 +    //    File dir = new File( System.getProperty( "user.dir" ) );
 +    //    File file = war.as( ExplodedExporter.class ).exportExploded( dir, "test-cluster.war" );
 +
 +    Document web = XmlUtils.readXml( war.get( "%2F/WEB-INF/web.xml" ).getAsset().openStream() );
 +    assertThat( web, hasXPath( "/web-app" ) );
 +    assertThat( web, hasXPath( "/web-app/servlet" ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/servlet-name" ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/servlet-name", equalTo( "test-cluster-knox-gateway-servlet" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/servlet-class", equalTo( "org.apache.knox.gateway.GatewayServlet" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/init-param/param-name", equalTo( "gatewayDescriptorLocation" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet/init-param/param-value", equalTo( "/WEB-INF/gateway.xml" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet-mapping/servlet-name", equalTo( "test-cluster-knox-gateway-servlet" ) ) );
 +    assertThat( web, hasXPath( "/web-app/servlet-mapping/url-pattern", equalTo( "/*" ) ) );
 +
 +    Document gateway = XmlUtils.readXml( war.get( "%2F/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/pattern", equalTo( "/webhdfs/v1/?**" ) ) );
 +    //assertThat( gateway, hasXPath( "/gateway/resource[1]/target", equalTo( "http://localhost:50070/webhdfs/v1/?{**}" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[1]/class", equalTo( "org.apache.knox.gateway.filter.ResponseCookieFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[2]/class", equalTo( "org.apache.shiro.web.servlet.ShiroFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[3]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[3]/class", equalTo( "org.apache.knox.gateway.filter.ShiroSubjectIdentityAdapter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/role", equalTo( "rewrite" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[4]/class", equalTo( "org.apache.knox.gateway.filter.rewrite.api.UrlRewriteServletFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[5]/role", equalTo( "identity-assertion" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[5]/class", equalTo( "org.apache.knox.gateway.identityasserter.filter.IdentityAsserterFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/role", equalTo( "authorization" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/name", equalTo( "AclsAuthz" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/class", equalTo( "org.apache.knox.gateway.filter.AclsAuthorizationFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[7]/role", equalTo( "dispatch" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[7]/name", equalTo( "webhdfs" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[7]/class", equalTo( "org.apache.knox.gateway.dispatch.GatewayDispatchFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/pattern", equalTo( "/webhdfs/v1/**?**" ) ) );
 +    //assertThat( gateway, hasXPath( "/gateway/resource[2]/target", equalTo( "http://localhost:50070/webhdfs/v1/{path=**}?{**}" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[1]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[1]/class", equalTo( "org.apache.knox.gateway.filter.ResponseCookieFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[2]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[2]/class", equalTo( "org.apache.shiro.web.servlet.ShiroFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[3]/role", equalTo( "authentication" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[3]/class", equalTo( "org.apache.knox.gateway.filter.ShiroSubjectIdentityAdapter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[4]/role", equalTo( "rewrite" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[4]/class", equalTo( "org.apache.knox.gateway.filter.rewrite.api.UrlRewriteServletFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[5]/role", equalTo( "identity-assertion" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[5]/class", equalTo( "org.apache.knox.gateway.identityasserter.filter.IdentityAsserterFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/role", equalTo( "authorization" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/name", equalTo( "AclsAuthz" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[1]/filter[6]/class", equalTo( "org.apache.knox.gateway.filter.AclsAuthorizationFilter" ) ) );
 +
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[7]/role", equalTo( "dispatch" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[7]/name", equalTo( "webhdfs" ) ) );
 +    assertThat( gateway, hasXPath( "/gateway/resource[2]/filter[7]/class", equalTo( "org.apache.knox.gateway.dispatch.GatewayDispatchFilter" ) ) );
 +
 +    LOG_EXIT();
 +  }
 +
 +
 +  @Test( timeout = LONG_TIMEOUT )
 +  public void testWebXmlGeneration() throws IOException, SAXException, ParserConfigurationException, URISyntaxException {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName("test-cluster");
 +    Service service = new Service();
 +    service.setRole("WEBHDFS");
 +    service.addUrl("http://localhost:50070/webhdfs");
 +    topology.addService(service);
 +    Provider provider = new Provider();
 +    provider.setRole("authentication");
 +    provider.setName("ShiroProvider");
 +    provider.setEnabled(true);
 +    Param param = new Param();
 +    param.setName("contextConfigLocation");
 +    param.setValue("classpath:app-context-security.xml");
 +    provider.addParam(param);
 +    topology.addProvider(provider);
 +    Provider asserter = new Provider();
 +    asserter.setRole("identity-assertion");
 +    asserter.setName("Default");
 +    asserter.setEnabled(true);
 +    topology.addProvider(asserter);
 +    Provider authorizer = new Provider();
 +    authorizer.setRole("authorization");
 +    authorizer.setName("AclsAuthz");
 +    authorizer.setEnabled(true);
 +    topology.addProvider(authorizer);
 +    Provider ha = new Provider();
 +    ha.setRole("ha");
 +    ha.setName("HaProvider");
 +    ha.setEnabled(true);
 +    topology.addProvider(ha);
 +
 +    for (int i = 0; i < 10; i++) {
 +      createAndTestDeployment(config, topology);
 +    }
 +    LOG_EXIT();
 +  }
 +
 +  private void createAndTestDeployment(GatewayConfig config, Topology topology) throws IOException, SAXException, ParserConfigurationException {
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment(config, topology);
 +    //      File dir = new File( System.getProperty( "user.dir" ) );
 +    //      File file = war.as( ExplodedExporter.class ).exportExploded( dir, "test-cluster.war" );
 +
 +    Document web = XmlUtils.readXml(war.get("%2F/WEB-INF/web.xml").getAsset().openStream());
 +    assertThat(web, hasXPath("/web-app/servlet/servlet-class", equalTo("org.apache.knox.gateway.GatewayServlet")));
 +    assertThat(web, hasXPath("/web-app/servlet/init-param/param-name", equalTo("gatewayDescriptorLocation")));
 +    assertThat(web, hasXPath("/web-app/servlet/init-param/param-value", equalTo("/WEB-INF/gateway.xml")));
 +    assertThat(web, hasXPath("/web-app/servlet-mapping/servlet-name", equalTo("test-cluster-knox-gateway-servlet")));
 +    assertThat(web, hasXPath("/web-app/servlet-mapping/url-pattern", equalTo("/*")));
 +    //testing the order of listener classes generated
 +    assertThat(web, hasXPath("/web-app/listener[2]/listener-class", equalTo("org.apache.knox.gateway.services.GatewayServicesContextListener")));
 +    assertThat(web, hasXPath("/web-app/listener[3]/listener-class", equalTo("org.apache.knox.gateway.services.GatewayMetricsServletContextListener")));
 +    assertThat(web, hasXPath("/web-app/listener[4]/listener-class", equalTo("org.apache.knox.gateway.ha.provider" +
 +        ".HaServletContextListener")));
 +    assertThat(web, hasXPath("/web-app/listener[5]/listener-class", equalTo("org.apache.knox.gateway.filter" +
 +        ".rewrite.api.UrlRewriteServletContextListener")));
 +  }
 +
 +  @Test( timeout = LONG_TIMEOUT )
 +  public void testDeploymentWithServiceParams() throws Exception {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Service service;
 +    Param param;
 +    Topology topology = new Topology();
 +    topology.setName( "test-cluster" );
 +
 +    service = new Service();
 +    service.setRole( "HIVE" );
 +    service.setUrls( Arrays.asList( new String[]{ "http://hive-host:50001/" } ) );
 +    param = new Param();
 +    param.setName( "someparam" );
 +    param.setValue( "somevalue" );
 +    service.addParam( param );
 +    topology.addService( service );
 +
 +    service = new Service();
 +    service.setRole( "WEBHBASE" );
 +    service.setUrls( Arrays.asList( new String[]{ "http://hbase-host:50002/" } ) );
 +    param = new Param();
 +    param.setName( "replayBufferSize" );
 +    param.setValue( "33" );
 +    service.addParam( param );
 +    topology.addService( service );
 +
 +    service = new Service();
 +    service.setRole( "OOZIE" );
 +    service.setUrls( Arrays.asList( new String[]{ "http://hbase-host:50003/" } ) );
 +    param = new Param();
 +    param.setName( "otherparam" );
 +    param.setValue( "65" );
 +    service.addParam( param );
 +    topology.addService( service );
 +
 +    EnterpriseArchive war = DeploymentFactory.createDeployment( config, topology );
 +    Document doc = XmlUtils.readXml( war.get( "%2F/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +    //    dump( doc );
 +
 +    Node resourceNode, filterNode, paramNode;
 +    String value;
 +
 +    resourceNode = node( doc, "gateway/resource[role/text()='HIVE']" );
 +    assertThat( resourceNode, is(not(nullValue())));
 +    filterNode = node( resourceNode, "filter[role/text()='dispatch']" );
 +    assertThat( filterNode, is(not(nullValue())));
 +    paramNode = node( filterNode, "param[name/text()='someparam']" );
 +    value = value( paramNode, "value/text()" );
 +    assertThat( value, is( "somevalue" ) ) ;
 +
 +    resourceNode = node( doc, "gateway/resource[role/text()='WEBHBASE']" );
 +    assertThat( resourceNode, is(not(nullValue())));
 +    filterNode = node( resourceNode, "filter[role/text()='dispatch']" );
 +    assertThat( filterNode, is(not(nullValue())));
 +    paramNode = node( filterNode, "param[name/text()='replayBufferSize']" );
 +    value = value( paramNode, "value/text()" );
 +    assertThat( value, is( "33" ) ) ;
 +
 +    resourceNode = node( doc, "gateway/resource[role/text()='OOZIE']" );
 +    assertThat( resourceNode, is(not(nullValue())));
 +    filterNode = node( resourceNode, "filter[role/text()='dispatch']" );
 +    assertThat( filterNode, is(not(nullValue())));
 +    paramNode = node( filterNode, "param[name/text()='otherparam']" );
 +    value = value( paramNode, "value/text()" );
 +    assertThat( value, is( "65" ) ) ;
 +
 +    FileUtils.deleteQuietly( deployDir );
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testDeploymentWithApplication() throws Exception {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +    URL serviceUrl = TestUtils.getResourceUrl( DeploymentFactoryFuncTest.class, "test-apps/minimal-test-app/service.xml" );
 +    File serviceFile = new File( serviceUrl.toURI() );
 +    File appsDir = serviceFile.getParentFile().getParentFile();
 +    ((GatewayTestConfig)config).setGatewayApplicationsDir(appsDir.getAbsolutePath());
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-topology" );
 +
 +    Application app;
 +
 +    app = new Application();
 +    app.setName( "minimal-test-app" );
 +    app.addUrl( "/minimal-test-app-path" );
 +    topology.addApplication( app );
 +
 +    EnterpriseArchive archive = DeploymentFactory.createDeployment( config, topology );
 +    assertThat( archive, notNullValue() );
 +
 +    Document doc;
 +
 +    doc = XmlUtils.readXml( archive.get( "META-INF/topology.xml" ).getAsset().openStream() );
 +    assertThat( doc, notNullValue() );
 +
 +    doc = XmlUtils.readXml( archive.get( "%2Fminimal-test-app-path/WEB-INF/gateway.xml" ).getAsset().openStream() );
 +    assertThat( doc, notNullValue() );
 +    //dump( doc );
 +    assertThat( doc, hasXPath("/gateway/resource/pattern", equalTo("/**?**")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[1]/role", equalTo("xforwardedheaders")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[1]/name", equalTo("XForwardedHeaderFilter")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[1]/class", equalTo(XForwardedHeaderFilter.class.getName())));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[2]/role", equalTo("rewrite")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[2]/name", equalTo("url-rewrite")));
 +    assertThat( doc, hasXPath("/gateway/resource/filter[2]/class", equalTo(UrlRewriteServletFilter.class.getName())));
 +
 +    LOG_EXIT();
 +  }
 +
 +  @Test( timeout = MEDIUM_TIMEOUT )
 +  public void testDeploymentWithServicesAndApplications() throws Exception {
 +    LOG_ENTER();
 +    GatewayConfig config = new GatewayTestConfig();
 +    File targetDir = new File(System.getProperty("user.dir"), "target");
 +    File gatewayDir = new File(targetDir, "gateway-home-" + UUID.randomUUID());
 +    gatewayDir.mkdirs();
 +    ((GatewayTestConfig) config).setGatewayHomeDir(gatewayDir.getAbsolutePath());
 +    File deployDir = new File(config.getGatewayDeploymentDir());
 +    deployDir.mkdirs();
 +    URL serviceUrl = TestUtils.getResourceUrl( DeploymentFactoryFuncTest.class, "test-apps/minimal-test-app/service.xml" );
 +    File serviceFile = new File( serviceUrl.toURI() );
 +    File appsDir = serviceFile.getParentFile().getParentFile();
 +    ((GatewayTestConfig)config).setGatewayApplicationsDir(appsDir.getAbsolutePath());
 +
 +    DefaultGatewayServices srvcs = new DefaultGatewayServices();
 +    Map<String, String> options = new HashMap<>();
 +    options.put("persist-master", "false");
 +    options.put("master", "password");
 +    try {
 +      DeploymentFactory.setGatewayServices(srvcs);
 +      srvcs.init(config, options);
 +    } catch (ServiceLifecycleException e) {
 +      e.printStackTrace(); // I18N not required.
 +    }
 +
 +    Topology topology = new Topology();
 +    topology.setName( "test-topology" );
 +
 +    Application app;
 +
 +    topology.setName( "test-cluster" );
 +    Service service = new Service();
 +    service.setRole( "WEBHDFS" );
 +    service.addUrl( "http://localhost:50070/test-service-url" );
 +    topology.addService( service );
 +
 +    app = new Application();
 +    app.setName( "minimal-test-app" );
 +    app.addUrl( "/minimal-test-app-path-one" );
 +    topology.addApplication( app );
 +
 +    app.setName( "minimal-test-app" );
 +    app.addUrl( "/minimal-test-app-path-two" );
 +    topology.addApplication( app );
 +
 +    EnterpriseArchive archive = DeploymentFactory.createDeployment( config, topology );
 +    assertThat( archive, notNullValue() );
 +
 +    Document doc;
 +    org.jboss.shrinkwrap.api.Node node;
 +
 +    node = archive.get( "META-INF/topology.xml" );
 +    assertThat( "Find META-INF/topology.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse META-INF/topology.xml", doc, notNullValue() );
 +
 +    node = archive.get( "%2F" );
 +    assertThat( "Find %2F", node, notNullValue() );
 +    node = archive.get( "%2F/WEB-INF/gateway.xml" );
 +    assertThat( "Find %2F/WEB-INF/gateway.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse %2F/WEB-INF/gateway.xml", doc, notNullValue() );
 +
 +    WebArchive war = archive.getAsType( WebArchive.class, "%2Fminimal-test-app-path-one" );
 +    assertThat( "Find %2Fminimal-test-app-path-one", war, notNullValue() );
 +    node = war.get( "/WEB-INF/gateway.xml" );
 +    assertThat( "Find %2Fminimal-test-app-path-one/WEB-INF/gateway.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse %2Fminimal-test-app-path-one/WEB-INF/gateway.xml", doc, notNullValue() );
 +
 +    war = archive.getAsType( WebArchive.class, "%2Fminimal-test-app-path-two" );
 +    assertThat( "Find %2Fminimal-test-app-path-two", war, notNullValue() );
 +    node = war.get( "/WEB-INF/gateway.xml" );
 +    assertThat( "Find %2Fminimal-test-app-path-two/WEB-INF/gateway.xml", node, notNullValue() );
 +    doc = XmlUtils.readXml( node.getAsset().openStream() );
 +    assertThat( "Parse %2Fminimal-test-app-path-two/WEB-INF/gateway.xml", doc, notNullValue() );
 +
 +    LOG_EXIT();
 +  }
 +
 +  private Node node( Node scope, String expression ) throws XPathExpressionException {
 +    return (Node)XPathFactory.newInstance().newXPath().compile( expression ).evaluate( scope, XPathConstants.NODE );
 +  }
 +
 +  private String value( Node scope, String expression ) throws XPathExpressionException {
 +    return XPathFactory.newInstance().newXPath().compile( expression ).evaluate( scope );
 +  }
 +
 +  private static void dump( org.jboss.shrinkwrap.api.Node node, String prefix ) {
 +    System.out.println( prefix + ": " + node.getPath() );
 +    Set<org.jboss.shrinkwrap.api.Node> children = node.getChildren();
 +    if( children != null && !children.isEmpty() ) {
 +      for( org.jboss.shrinkwrap.api.Node child : children ) {
 +        dump( child, prefix + "    " );
 +      }
 +    }
 +  }
 +
 +  private static void dump( Archive archive ) {
 +    Map<ArchivePath,org.jboss.shrinkwrap.api.Node> content = archive.getContent();
 +    for( Map.Entry<ArchivePath,org.jboss.shrinkwrap.api.Node> entry : content.entrySet() ) {
 +      dump( entry.getValue(), "    " );
 +    }
 +  }
 +
 +}
Simple merge
diff --cc pom.xml
Simple merge