Beruflich Dokumente
Kultur Dokumente
Spanning Tree
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
struct Edge
{
int src, dest, weight;
};
struct Graph
{
int V, E;
struct Edge* edge;
};
struct Graph* createGraph(int V, int E)
{
struct Graph* graph = (struct Graph*) malloc( sizeof(struct
Graph) );
graph->V = V;
graph->E = E;
graph->edge = (struct Edge*) malloc( graph->E * sizeof( struct
Edge ) );
return graph;
}
struct subset
{
int parent;
int rank;
};
{
int xroot = find(subsets, x);
int yroot = find(subsets, y);
if (subsets[xroot].rank < subsets[yroot].rank)
subsets[xroot].parent = yroot;
else if (subsets[xroot].rank > subsets[yroot].rank)
subsets[yroot].parent = xroot;
else
{
subsets[yroot].parent = xroot;
subsets[xroot].rank++;
}
}
int myComp(const void* a, const void* b)
{
struct Edge* a1 = (struct Edge*)a;
struct Edge* b1 = (struct Edge*)b;
return a1->weight > b1->weight;
}
void KruskalMST(struct Graph* graph)
{
int V = graph->V;
struct Edge result[V];
int e = 0;
int i = 0;
qsort(graph->edge, graph->E, sizeof(graph->edge[0]), myComp);
struct subset *subsets =
(struct subset*) malloc( V * sizeof(struct subset) );
for (int v = 0; v < V; ++v)
{
subsets[v].parent = v;
subsets[v].rank = 0;
}
while (e < V - 1)
{
struct Edge next_edge = graph->edge[i++];
int x = find(subsets, next_edge.src);
int y = find(subsets, next_edge.dest);
if (x != y)
{
result[e++] = next_edge;
Union(subsets, x, y);
}
}
printf("Following are the edges in the constructed MST\n");
for (i = 0; i < e; ++i)
printf("%d -- %d == %d\n", result[i].src, result[i].dest,
result[i].weight);
return;
}
int main()
{
/* Let us create following weighted graph
10
0--------1
| \
|
6|
5\
|15
|
\ |
2--------3
4
*/
int V = 4;
int E = 5;
struct Graph* graph = createGraph(V, E);
graph->edge[3].dest = 3;
graph->edge[3].weight = 15;
// add edge 2-3
graph->edge[4].src = 2;
graph->edge[4].dest = 3;
graph->edge[4].weight = 4;
KruskalMST(graph);
return 0;
}
struct MinHeapNode{
char data;
unsigned freq;
MinHeapNode *left, *right;
MinHeapNode(char data, unsigned freq)
{
left = right = NULL;
this->data = data;
this->freq = freq;
}
};
struct compare{
bool operator()(MinHeapNode* l, MinHeapNode* r){
printCodes(minHeap.top(), "");
}
int main(){
char arr[] = { 'a', 'b', 'c', 'd', 'e', 'f' };
int freq[] = { 5, 9, 12, 13, 16, 45 };
int size = sizeof(arr) / sizeof(arr[0]);
HuffmanCodes(arr, freq, size);
return 0;
}
2) Assign a distance value to all vertices in the input graph. Initialize all
distance values as INFINITE. Assign distance value as 0 for the source vertex
so that it is picked first.
3) While sptSet doesnt include all vertices
Pick a vertex u which is not there in sptSet and has minimum distance
value.
Include u to sptSet.
#include <stdio.h>
#include <limits.h>
#define V 9
int minDistance(int dist[], bool sptSet[]){
int min = INT_MAX, min_index;
for (int v = 0; v < V; v++)
if (sptSet[v] == false && dist[v] <= min)
min = dist[v], min_index = v;
return min_index;
}
int printSolution(int dist[], int n){
printf("Vertex
bool sptSet[V];
for (int i = 0; i < V; i++)
dist[i] = INT_MAX, sptSet[i] = false;
dist[src] = 0;
for (int count = 0; count < V-1; count++){
int u = minDistance(dist, sptSet);
sptSet[u] = true;
for (int v = 0; v < V; v++)
if (!sptSet[v] && graph[u][v] && dist[u] != INT_MAX
&& dist[u]+graph[u][v] < dist[v])
dist[v] = dist[u] + graph[u][v];
}
printSolution(dist, V);
}
int main(){
int graph[V][V] = {{0, 4, 0, 0, 0, 0, 0, 8, 0},
{4, 0, 8, 0, 0, 0, 0, 11, 0},
{0, 8, 0, 7, 0, 4, 0, 0, 2},
{0, 0, 7, 0, 9, 14, 0, 0, 0},
{0, 0, 0, 9, 0, 10, 0, 0, 0},
{0, 0, 4, 14, 10, 0, 2, 0, 0},
{0, 0, 0, 0, 0, 2, 0, 1, 6},
{8, 11, 0, 0, 0, 0, 1, 0, 7},
{0, 0, 2, 0, 0, 0, 6, 7, 0}
};
dijkstra(graph, 0);
return 0;
}
int s[] =
{1, 3, 0, 5, 8, 5};
int f[] =
{2, 4, 6, 7, 9, 9};
int n = sizeof(s)/sizeof(s[0]);
printMaxActivities(s, f, n);
getchar();
return 0;
}
Let the give set of activities be S = {1, 2, 3, ..n} and activities be sorted by
finish time. The greedy choice is to always pick activity 1. How come the
activity 1 always provides one of the optimal solutions. We can prove it by
showing that if there is another solution B with first activity other than 1, then
there is also a solution A of same size with activity 1 as first activity. Let the
first activity selected by B be k, then there always exist A = {B {k}} U {1}.
(Note that the activities in B are independent and k has smallest finishing
time among all. Since k is not 1, finish(k) >= finish(1)).
Dynamic Programming
Dynamic programming is a method for solving a complex problem by
breaking it down into a collection of simpler sub-problems, solving each of
those sub-problems just once, and storing their solutions ideally, using a
memory-based data structure. The next time the same sub-problem occurs,
instead of recomputing its solution, one simply looks up the previously
computed solution, thereby saving computation time at the expense of a
modest expenditure in storage space. Each of the sub-problem solutions is
indexed in some way, typically based on the values of its input parameters,
so as to facilitate its lookup.) The technique of storing solutions to subproblems instead of recomputing them is called "memoization".
Dynamic programming algorithms are often used for optimization. A dynamic
programming algorithm will examine the previously solved sub-problems and
will combine their solutions to give the best solution for the given problem. In
comparison, a greedy algorithm treats the solution as some sequence of
steps and picks the locally optimal choice at each step. Using a greedy
algorithm does not guarantee an optimal solution, because picking locally
optimal choices may result in a bad global solution, but it is often faster to
calculate. Fortunately, some greedy algorithms (such as Kruskal's or Prim's
for minimum spanning trees) are proven to lead to the optimal solution. In
addition to finding optimal solutions to some problem, dynamic programming
can also be used for counting the number of solutions, or counting the
number of optimal solutions.
Fibonacci sequence
Checkerboard
Sequence alignment
}
}
if(min!=999)
cost+=kmin;
return nc;
}
void put(){
printf("\n\nMinimum cost:");
printf("%d",cost);
}
void main(){
clrscr();
get();
printf("\n\nThe Path is:\n\n");
mincost(0);
put();
getch();
}
K[i-1]
[w]);
else
K[i][w] = K[i-1][w];
}
}
return K[n][W];
}
int main(){
int val[] = {60, 100, 120};
int wt[] = {10, 20, 30};
int
W = 50;
int n = sizeof(val)/sizeof(val[0]);
printf("%d", knapSack(W, wt, val, n));
return 0;
}
A* search algorithm solves for single pair shortest path using heuristics
to try to speed up the search.
Johnson's algorithm solves all pairs shortest paths, and may be faster
than FloydWarshall on sparse graphs.
Bellman-Ford Algorithm
Given a graph and a source vertex src in graph, find shortest paths from src
to all vertices in the given graph. The graph may contain negative weight
edges.
Dijksras algorithm is a Greedy algorithm and time complexity is O(VLogV)
(with the use of Fibonacci heap). Dijkstra doesnt work for graphs with
negative weight edges, Bellman-Ford works for such graphs. Bellman-Ford is
also simpler than Dijkstra and suites well for distributed systems. But time
complexity of Bellman-Ford is O(VE), which is more than Dijkstra.
Like other dynamic programming problems, the algorithm calculate shortest
paths in bottom-up manner. It first calculates the shortest distances for the
shortest paths which have at-most one edge in the path. Then, it calculates
shortest paths with at most 2 edges, and so on. After the ith iteration of outer
loop, the shortest paths with at most i edges are calculated. There can be
maximum |V| 1 edges in any simple path, that is why the outer loop runs |v|
1 times. The idea is, assuming that there is no negative weight cycle, if we
have calculated shortest paths with at most i edges, then an iteration over all
edges guarantees to give shortest path with at-most (i+1) edges.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
struct Edge{
int src, dest, weight;
};
struct Graph{
int V, E;
struct Edge* edge;
};
struct Graph* createGraph(int V, int E){
struct Graph* graph =
(struct Graph*) malloc( sizeof(struct Graph) );
graph->V = V;
graph->E = E;
graph->edge =
(struct Edge*) malloc( graph->E * sizeof( struct Edge ) );
return graph;
}
= INT_MAX;
dist[src] = 0;
for (int i = 1; i <= V-1; i++){
for (int j = 0; j < E; j++){
int u = graph->edge[j].src;
int v = graph->edge[j].dest;
int weight = graph->edge[j].weight;
if (dist[u] != INT_MAX && dist[u] + weight < dist[v])
dist[v] = dist[u] + weight;
}
}
for (int i = 0; i < E; i++){
int u = graph->edge[i].src;
int v = graph->edge[i].dest;
int weight = graph->edge[i].weight;
if (dist[u] != INT_MAX && dist[u] + weight < dist[v])
graph->edge[6].src = 3;
graph->edge[6].dest = 1;
graph->edge[6].weight = 1;
graph->edge[7].src = 4;
graph->edge[7].dest = 3;
graph->edge[7].weight = -3;
BellmanFord(graph, 0);
return 0;
}
Multistage Graphs
A multistage graph G = (V,E) is a directed graph in which vertices are
partitioned into K>=2 disjoint set (set Vi) where [1<=i<=K}. In addition, if
(u,v) is an edge E then u vi, vi vi+1.
Let c(I,j) be the cost of edge (i,j). The cost of a path from (S to T) is the sum of
costs of the edges on the path. The multistage graph problem is to find the
minimum cost path from S to T. The value on the edges are called the
cost of the edges.
A dynamic programming solution to the multistage graph problem is as
follows :
Let path(i,j) be some specification of the minimal path from vertex j in set i to
vertex t; C(i,j) is the cost of this path; c(j,t) is the weight of the edge from j to
t.
C(i,j) = min
int[]
D = new int[n];
int[]
P = new int[k];