Module tsp.tsp_hk

TSP - Held and Karp (Lagrangian) lower bound for the symmetric TSP based on 1-trees.

Expand source code
"""
    TSP - Held and Karp (Lagrangian) lower bound for the symmetric TSP
          based on 1-trees.
"""
import networkx as nx
import numpy as np
from tsp.tsp_trees import get1Tree 
      
#---------------------------------------------------------------------

def LR_1tree ( problem, G=None, silent=True ):
    """
    Implements the Lagrangian 1-tree lower bound obtained by relaxing
    degree constraints in Lagrangian manner and (approximately) solving
    the Lagrangian dual by means of subgradient optimization.
    
    Parameters:
        **problem** : class  
            TSP problem object as returned by function load_problem of
            package tsplib95  
        **G** : object, optional  
            If not None, the networkx graph of the problem. Default=None.  
        **silent** : bool, optional  
            if false, information about the computations is printed.
            Default=True.

    Returns:
        **lowBnd** : float  
            The Lagrangian bound.   
        **T_best** : list of pairs of int  
            The 1-tree (edge list) that obtained when solving the Lagrangian
            subproblem at the best multipliers obtained.  
        **best_w** : list of float  
            The list of Lagrangian multiplier values.
    """     
    # If no graph passed as argument, obtain the graph associated with the TSP
    G_local = G is None 
    if G_local:
        G = problem.get_graph()
        G.remove_edges_from(nx.selfloop_edges(G))
        
    # Introduce the modified edge lengths as additional edge attribute
    for e in G.edges: 
        G[e[0]][e[1]]['mweight'] = G[e[0]][e[1]]['weight']
        
    # Remember number of a node (first node gets number 0)
    for i, node in enumerate( G.nodes ):
        G.nodes[node]['num'] = i
    
    # Let node of smallest index be the special node
    k = min ( node for node in G.nodes )

    # Initialize current and best Lagrangian multiplier values
    lowBnd = 0.0
    n = len(G)
    best_w = np.zeros(n, dtype=float ) # best Lagrangian multipliers
    cur_w = np.zeros(n, dtype=float )  # current Lagrangian multipliers
    T_best = []
    
    # Set iteration limit, initialize step length    
    iter_max = 20*n
    lam_para = 0.999
    stop = False
    step = 2.0
    itera = 0
    
    # subgradient in previous iteration
    sg_prev = np.zeros( n, dtype=float )
    
    if not silent:
        print("----------------------------------------")
        print("Iter  Lower_Bound  Best_Bound  Grad.norm")
        print("----------------------------------------")
    
    while not stop:
       
        itera += 1
        
        # Compute the 1-tree for the current multiplier values
        cur_bnd, tree = get1Tree(k, G, elen='mweight', edgeLst = False )
        cur_bnd -= 2*np.sum( cur_w )
        
        # Obtain the subgradient and its length
        sg = np.array( [ len( tree.edges(node) ) for node in tree.nodes ] ) - 2
        nrm = np.linalg.norm( sg )
        
        # Check for bound improvement
        if cur_bnd > lowBnd:
            lowBnd = cur_bnd
            best_w = np.copy( cur_w )
            T_best = [ (e[0],e[1]) for e in tree.edges ]
            
        if nrm < 1.0E-4: break             
          
        # Apply subgradient step
        alpha = 0.7 + 0.3*(itera < 2 )
        cur_w += step * ( alpha*sg + (1.0-alpha)*sg_prev )
        sg_prev = np.copy( sg )
        step *= lam_para
        if step < 1.0E-6: break
        if itera >= iter_max: break;
        
        # Display info on current iteration
        if not silent:
            print('{0:4d}  {1:11.2f}  {2:10.2f}  {3:9.2f}'.format(itera,cur_bnd,lowBnd,nrm))
        
        # Adjust modified edge length
        for e in G.edges:
            i, j = e[0], e[1]
            n_i, n_j = G.nodes[i]['num'], G.nodes[j]['num']
            G[i][j]['mweight'] = G[i][j]['weight'] + cur_w[n_i] + cur_w[n_j]
                                 
    # Subgradient steps finished
    if not G_local:
        for e in G.edges: del G[e[0]][e[1]]['mweight']
        for i in G.nodes: del G.nodes[i]['num']                                                           
    
    return lowBnd, T_best, best_w

Functions

def LR_1tree(problem, G=None, silent=True)

Implements the Lagrangian 1-tree lower bound obtained by relaxing degree constraints in Lagrangian manner and (approximately) solving the Lagrangian dual by means of subgradient optimization.

Parameters

problem : class
TSP problem object as returned by function load_problem of package tsplib95
G : object, optional
If not None, the networkx graph of the problem. Default=None.
silent : bool, optional
if false, information about the computations is printed. Default=True.

Returns

lowBnd : float
The Lagrangian bound.
T_best : list of pairs of int
The 1-tree (edge list) that obtained when solving the Lagrangian subproblem at the best multipliers obtained.
best_w : list of float
The list of Lagrangian multiplier values.

Expand source code
def LR_1tree ( problem, G=None, silent=True ):
    """
    Implements the Lagrangian 1-tree lower bound obtained by relaxing
    degree constraints in Lagrangian manner and (approximately) solving
    the Lagrangian dual by means of subgradient optimization.
    
    Parameters:
        **problem** : class  
            TSP problem object as returned by function load_problem of
            package tsplib95  
        **G** : object, optional  
            If not None, the networkx graph of the problem. Default=None.  
        **silent** : bool, optional  
            if false, information about the computations is printed.
            Default=True.

    Returns:
        **lowBnd** : float  
            The Lagrangian bound.   
        **T_best** : list of pairs of int  
            The 1-tree (edge list) that obtained when solving the Lagrangian
            subproblem at the best multipliers obtained.  
        **best_w** : list of float  
            The list of Lagrangian multiplier values.
    """     
    # If no graph passed as argument, obtain the graph associated with the TSP
    G_local = G is None 
    if G_local:
        G = problem.get_graph()
        G.remove_edges_from(nx.selfloop_edges(G))
        
    # Introduce the modified edge lengths as additional edge attribute
    for e in G.edges: 
        G[e[0]][e[1]]['mweight'] = G[e[0]][e[1]]['weight']
        
    # Remember number of a node (first node gets number 0)
    for i, node in enumerate( G.nodes ):
        G.nodes[node]['num'] = i
    
    # Let node of smallest index be the special node
    k = min ( node for node in G.nodes )

    # Initialize current and best Lagrangian multiplier values
    lowBnd = 0.0
    n = len(G)
    best_w = np.zeros(n, dtype=float ) # best Lagrangian multipliers
    cur_w = np.zeros(n, dtype=float )  # current Lagrangian multipliers
    T_best = []
    
    # Set iteration limit, initialize step length    
    iter_max = 20*n
    lam_para = 0.999
    stop = False
    step = 2.0
    itera = 0
    
    # subgradient in previous iteration
    sg_prev = np.zeros( n, dtype=float )
    
    if not silent:
        print("----------------------------------------")
        print("Iter  Lower_Bound  Best_Bound  Grad.norm")
        print("----------------------------------------")
    
    while not stop:
       
        itera += 1
        
        # Compute the 1-tree for the current multiplier values
        cur_bnd, tree = get1Tree(k, G, elen='mweight', edgeLst = False )
        cur_bnd -= 2*np.sum( cur_w )
        
        # Obtain the subgradient and its length
        sg = np.array( [ len( tree.edges(node) ) for node in tree.nodes ] ) - 2
        nrm = np.linalg.norm( sg )
        
        # Check for bound improvement
        if cur_bnd > lowBnd:
            lowBnd = cur_bnd
            best_w = np.copy( cur_w )
            T_best = [ (e[0],e[1]) for e in tree.edges ]
            
        if nrm < 1.0E-4: break             
          
        # Apply subgradient step
        alpha = 0.7 + 0.3*(itera < 2 )
        cur_w += step * ( alpha*sg + (1.0-alpha)*sg_prev )
        sg_prev = np.copy( sg )
        step *= lam_para
        if step < 1.0E-6: break
        if itera >= iter_max: break;
        
        # Display info on current iteration
        if not silent:
            print('{0:4d}  {1:11.2f}  {2:10.2f}  {3:9.2f}'.format(itera,cur_bnd,lowBnd,nrm))
        
        # Adjust modified edge length
        for e in G.edges:
            i, j = e[0], e[1]
            n_i, n_j = G.nodes[i]['num'], G.nodes[j]['num']
            G[i][j]['mweight'] = G[i][j]['weight'] + cur_w[n_i] + cur_w[n_j]
                                 
    # Subgradient steps finished
    if not G_local:
        for e in G.edges: del G[e[0]][e[1]]['mweight']
        for i in G.nodes: del G.nodes[i]['num']                                                           
    
    return lowBnd, T_best, best_w