Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- package inf102.h20.student;
- import java.util.ArrayList;
- import java.util.Collections;
- import java.util.Comparator;
- import java.util.HashMap;
- import java.util.LinkedList;
- import java.util.List;
- import java.util.PriorityQueue;
- import inf102.h20.graph.Edge;
- import inf102.h20.graph.Graph;
- import inf102.h20.graph.WeightedGraph;
- public class ProblemSolver implements IProblem {
- @Override
- /**
- * Given a weigthed graph, this returns an ArrayList of the MST.
- *
- * Time complexity: If V > E+1, we don't have a tree. If V-1 == E, we already have a MST. Hence, we can assume
- * that we will always have E >> V. Thus, time complexity is O(E log E) from adding things to the PriorityQueue
- */
- public <T, E extends Comparable<E>> ArrayList<Edge<T>> mst(WeightedGraph<T, E> g) {
- //This has to be an ArrayList, because that's what we're expected to return
- ArrayList<Edge<T>> mstEdges = new ArrayList<Edge<T>>();
- //Instead of a PriorityQueue we could use an ArrayList. Because we only add things once,
- //we could sort the ArrayList in the reverse order, and then always remove the last element
- //to retain the O(1) remove complexity. We would still end up with V log V time complexity.
- //Using a PQ does sound more intuitive though.
- //#TODO isn't arraylist faster? Just O(V)
- PriorityQueue<Edge<T>> pq = new PriorityQueue<Edge<T>>(new Comparator<Edge<T>>() {
- public int compare(Edge<T> e1, Edge<T> e2) {
- return g.getWeight(e1).compareTo(g.getWeight(e2));
- }});
- UnionFind<T> UF = new UnionFind<T>(); //Stole this from some dude
- for (T V: g.vertices()) {
- UF.add(V, V); //O(V)
- }
- for (Edge<T> e: g.edges()) { //O(E)
- pq.add(e); //O(log E)
- }
- while (!pq.isEmpty() && mstEdges.size() < g.numVertices()-1) { //O(V)
- Edge<T> e = pq.poll(); //O(1)
- T v = e.a;
- T w = e.b;
- if (UF.compressFind(v) == UF.compressFind(w)) { //O(α(n))
- continue;
- }
- UF.union(w, v); //O(α(n))
- mstEdges.add(e); //O(1)
- }
- return mstEdges;
- }
- /**
- *
- * This is a modified version of https://gist.github.com/autolab2013/8b6035bf014a3f15cdf2
- *
- * The time complexity of both union and compessFind should be O(α(n)), and add should be O(1)
- *
- * @author https://gist.github.com/autolab2013
- *
- * @param <T>
- */
- class UnionFind<T> {
- private HashMap<T, T> parent;
- public UnionFind() {
- parent = new HashMap<>();
- }
- public void add(T n, T p) {
- parent.put(n, p); //O(1)
- }
- public void union(T n1, T n2) {
- T p1 = compressFind(n1);
- T p2 = compressFind(n2);
- if(p1 != p2) {
- parent.put(p1, p2); //O(1)
- }
- }
- public T compressFind(T n) { //It's hard to say, the book just says very very nearly amortized O(1), but
- T p = parent.get(n); //I'll trust wikipedia who says O(α(n)), which is apparently the inverse Ackermann function.
- while(p != parent.get(p)) { //Either way it's really small.
- p = parent.get(p);
- }
- T tmp = null;
- T father = parent.get(n);
- while(father != parent.get(father)) {
- tmp = parent.get(father);
- parent.put(father, p);
- father = tmp;
- }
- parent.put(n, p);
- return p;
- }
- }
- @Override
- /**
- * Time complexity: O(V)
- */
- public <T> T lca(Graph<T> g, T root, T u, T v) {
- List<T> pathU = path(g, root, u, root); //O(V)
- List<T> pathV = path(g, root, v, root); //O(V)
- Collections.reverse(pathU); //O(V)
- Collections.reverse(pathV); //O(V)
- int lcaIndex = 0;
- for (int i = 0; i < Math.min(pathU.size(), pathV.size()); i++) { //O(V)
- if (pathU.get(i).equals(pathV.get(i))) { //O(1)
- lcaIndex = i;
- }
- else break;
- }
- return pathU.get(lcaIndex);
- }
- /**
- * Not an optimal algorithm...
- * Time complexity: O(V)
- * @param <T>
- * @param g
- * @param root
- * @param u
- * @param last
- * @return
- */
- public <T> List<T> path(Graph<T> g, T root, T u, T last) {
- HashMap<T, T> edgeTo = new HashMap<T, T>();
- HashMap<T, Boolean> marked = new HashMap<T, Boolean>();
- for (T V: g.vertices()) { //Collections.fill //O(V)
- marked.put(V, false);
- }
- LinkedList<T> verticies = new LinkedList<T>();
- verticies.add(root); //O(1)
- marked.put(root, true);
- //Struggled a bit with finding out this one. At first glance it seems like the while loop is O(V), and
- //then the for loop is also O(V), making this O(V^2). However, there is a relationship between the
- //number of times the while loop will run and the number of times the for loop will run. If you have a tree
- //where every node (except the first and last) have a degree of 2, then the while loop will run V times, and the
- //for loop will only be O(1). The other case of the root having degree V-1 means that the while loop will be
- //O(1) and the for loop will be O(V).
- //Either way, the solution is to think logically. We only visit each node once, and we check each edge twice
- //This gives us O(2E+V). Since E = V-1 in the case of a tree, we have O(3V) = O(V)
- //O(V)
- while(!verticies.isEmpty()) {
- T vertex = verticies.remove();
- for (T V: g.neighbours(vertex)) {
- if (!marked.get(V) && V != last) {
- edgeTo.put(V, vertex);
- marked.put(V, true);
- verticies.add(V); //O(log 1)
- }
- }
- if (vertex == u) break; //No point in traversing the whole tree if we already found the power outage.
- }
- List<T> path = new ArrayList<T>();
- for (T x = u; !x.equals(root); x = edgeTo.get(x)) path.add(x); //O(V)
- path.add(root);
- return path;
- }
- @Override
- public <T> Edge<T> addRedundant(Graph<T> tree, T root) {
- HashMap<T, T> edgeTo = new HashMap<T, T>();
- //Originally I filled this HashMap with all the verticies in g
- //and filled it with false, so that I could check if it was
- //true or false. However, simply checking if the key exists
- //and then adding the key (and setting it to true as that seem
- //to make sense) if it didn't does save a fair few iterations.
- //The time complexity remains the same, but you save O(V) iterations
- //on not having to fill it, and you save a few during the last step
- //where you count the number of trues..
- //#TODO put this in svar.md instead
- HashMap<T, Integer> count = new HashMap<T, Integer>();
- LinkedList<T> verticies = new LinkedList<T>();
- verticies.add(root);
- count.put(root, 0);
- while(!verticies.isEmpty()) { //O(V)
- T vertex = verticies.remove();
- for (T V: tree.neighbours(vertex)) {
- if (!count.containsKey(V)) {
- edgeTo.put(V, vertex);
- count.put(V, 1);
- verticies.add(V);
- }}}
- for (T V: count.keySet()) {
- for (T x = V; !x.equals(root); x = edgeTo.get(x)) count.put(x, count.get(x)+1);
- }
- T vertex = root;
- while (tree.degree(vertex) < 2) {
- for (T V: tree.neighbours(vertex)) {
- if (count.get(V) > 0) {
- count.put(V, 0);
- vertex = V;
- }}}
- if (vertex != root) return new Edge<T>(vertex, root);
- List<T> vertexToJoin = new ArrayList<T>();
- for (int i = 0; i < 2; i++) {
- vertex = root;
- while (tree.degree(vertex) > 1) {
- int max = -1; //Apparently these should be declared in the smallest possible scope?
- count.put(vertex, 0); //Also somehow didn't manage to make this work with the 'last'
- for (T V: tree.neighbours(vertex)) {
- if (count.get(V) > max) {
- vertex = V;
- max = count.get(V);
- }}}
- vertexToJoin.add(vertex);
- }
- return new Edge<T>(vertexToJoin.get(0), vertexToJoin.get(1));
- }
- }
Add Comment
Please, Sign In to add comment