Find k closest elements to a given value

Runtime: O(Logn + k)

int findCrossOver(int arr[], int low, int high, int x)
{
if (arr[high] <= x)
return high;
if (arr[low] > x)
return low;

int mid = (low + high)/2;
if (arr[mid] <= x && arr[mid+1] > x)
return mid;

if(arr[mid] < x)
return findCrossOver(arr, mid+1, high, x);
return findCrossOver(arr, low, mid - 1, x);
}

void printKclosest(int arr[], int x, int k, int n)
{
int l = findCrossOver(arr, 0, n-1, x);
int r = l+1; // Right index to search
int count = 0; 

if (arr[l] == x) l--; // Assumption: all elements in arr[] are distinct

while (l >= 0 && r < n && count < k)
{
if (x - arr[l] < arr[r] - x)
printf("%d ", arr[l--]);
else
printf("%d ", arr[r++]);
count++;
}

while (count < k && l >= 0)
printf("%d ", arr[l--]), count++;

while (count < k && r < n)
printf("%d ", arr[r++]), count++;
}

Sort n numbers in range from 0 to n^2 – 1 in O (n)

Radix Sort takes O(d*(n+b)) time where b is the base for representing numbers. 
Since n2-1 is the maximum possible value, the value of d would be O(logb(n)). 

So overall time complexity is O((n + b) * logb(n)).
If we set b as n, the value of O(logb(n)) becomes O(1) and overall time complexity becomes O(n).

// the digit represented by exp.
int countSort(int arr[], int n, int exp)
{
 
    int output[n];
    int i, count[n] ;
    for (int i=0; i < n; i++)
       count[i] = 0;
 
    for (i = 0; i < n; i++)
        count[ (arr[i]/exp)%n ]++;
 
    for (i = 1; i < n; i++)
        count[i] += count[i - 1];
 
    for (i = n - 1; i >= 0; i--)
        output[--count[ (arr[i]/exp)%n]] = arr[i];

    for (i = 0; i < n; i++)
        arr[i] = output[i];
}

void sort(int arr[], int n)
{
    // Do counting sort for first digit in base n. 
       exp (n^0 = 0) is passed.
    countSort(arr, n, 1);
 
    // Do counting sort for second digit in base n. 
       exp (n^1 = n) is passed.
    countSort(arr, n, n);
}

How to sort if range is from 1 to n2 ?
1) Subtract all numbers by 1.
2) Since the range is now 0 to n2, do counting sort twice as done in the above implementation.
3) After the elements are sorted, add 1 to all numbers to obtain the original numbers.

How to sort if range is from 0 to n^3 -1?
Since there can be 3 digits in base n, we need to call counting sort 3 times.

Search in an almost sorted array

int binarySearch(int arr[], int l, int r, int x)
{
   if (r >= l)
   {
        int mid = l + (r - l)/2;
 
        // If the element is present at one of the middle 3 positions
        if (arr[mid] == x)  return mid;
        if (mid > l && arr[mid-1] == x) return (mid - 1);
        if (mid < r && arr[mid+1] == x) return (mid + 1);
 
        if (arr[mid] > x) return binarySearch(arr, l, mid-2, x);
 
        return binarySearch(arr, mid+2, r, x);
   }
       return -1;
}

When does the worst case of Quicksort occur?

The answer depends on strategy for choosing pivot. In early versions of Quick Sort where leftmost (or rightmost) element is chosen as pivot, 
the worst occurs in following cases.

1) Array is already sorted in same order.
2) Array is already sorted in reverse order.
3) All elements are same (special case of case 1 and 2) 

Since these cases are very common use cases, the problem was easily solved by choosing either a random index for the pivot, choosing the middle index of the partition 
or (especially for longer partitions) choosing the median of the first, middle and last element of the partition for the pivot. 
With these modifications, the worst case of Quick sort has less chances to occur, but worst case can still occur 
if the input array is such that the maximum (or minimum) element is always chosen as pivot.

Shell Sort

ShellSort is mainly a variation of Insertion Sort.  
In insertion sort, we move elements only one position ahead. When an element has to be moved far ahead, many movements are involved. 
The idea of shellSort is to allow exchange of far items.


int shellSort(int arr[], int n)
{
    // Start with a big gap, then reduce the gap
    for (int gap = n/2; gap > 0; gap /= 2)
    {
        // Do a gapped insertion sort for this gap size.
        for (int i = gap; i < n; i++)
        {
            int temp = arr[i];
            for (int j = i; j >= gap && arr[j - gap] > temp; j -= gap)
                arr[j] = arr[j - gap];
             
            arr[j] = temp;
        }
    }
}
Some sorting algorithms are stable by nature like Insertion sort, Merge Sort, Bubble Sort, etc. 
And some sorting algorithms are not, like Heap Sort, Quick Sort, etc.

Radix sort

The idea of Radix Sort is to do digit by digit sort starting from least significant digit to most significant digit.
Let there be d digits in input integers. Radix Sort takes O(d*(n+b)) time where b is the base for representing numbers, 
for example, for decimal system, b is 10.
If k is the maximum possible value, then d would be  O(Logb(k)). So overall time complexity is O((n + b) * Logb(k)) .
If we set b as n, we get the time complexity as O(n). 
In other words, we can sort an array of integers with range from 1 to nc if the numbers are represented in base n (or every digit takes  bits).

void radixsort(int *a, int n)
{
  int i, b[MAX], m = a[0], exp = 1;
 
  //Find the greatest value
  for (i = 1; i < n; i++)
  {
    if (a[i] > m)
      m = a[i];
  }
 
  //Loop until exp is bigger than the largest number
  while (m / exp > 0)
  {
    int bucket[BASE] = { 0 };
 
    for (i = 0; i < n; i++)
      bucket[(a[i] / exp) % BASE]++;
 
    //Add the count of the previous buckets to acquire the indexes after the 
      end of each bucket location in the array
    for (i = 1; i < BASE; i++)
      bucket[i] += bucket[i - 1]; 

    for (i = n - 1; i >= 0; i--)
      b[--bucket[(a[i] / exp) % BASE]] = a[i];
 
    for (i = 0; i < n; i++)
      a[i] = b[i];
 
    //Multiply exp by the BASE to get the next group of keys
    exp *= BASE;
 
  }
}

How is the complexity of bucket sort is O(n+k) if we implement buckets using linked lists?

Right now, you are correct that iterating across the input array to distribute the elements into buckets takes time O(n). 
However, you are slightly off when you say that the total amount of time required to assemble the array is O(k * (average number of elements per bucket)). 
Note that because there are n elements and k buckets, this would come out to O(k * (n / k)) = O(n), for a total runtime of O(n). 

It is not true that k * O(n / k) = O(n). The reason for this is that the term O(n / k) is hiding a constant factor. 
When you visit each bucket and take a look at the elements it contains, it doesn't take exactly n / k time, or even some constant multiple of n / k time. 

For example, what happens if the bucket is empty? In that case, you're still spending some amount of time looking at the bucket, 
since you have to determine that you shouldn't iterate over its elements. 

Thus a more accurate representation of the time required per bucket is something like c0(n / k) + c1, where c0 and c1 are implementation-specific constants. 
This expression is, of course, O(n / k).

The catch is what happens when you multiply this expression by k to get the total amount of work done. 
If you compute k * (c0(n / k) + c1)
You get c0n + c1k
As you can see, this expression depends directly on k, so the total runtime is O(n + k).

Bucket Sort

Bucket sort is mainly useful when input is uniformly distributed over a range.

Void Bucket_Sort(int X[n], int n)
    {
    int i,j, Y[n+1];
    for (i = 0; i < n; i++) Y[i] = 0;
    for (i = 0; i < n; i++) Y[X[i]]++;
    for (i = 0, j = 0; i < n; i++)
        while(Y[i]-- > 0) X[j++] = i
    }

Runtime – O(n)

Quick Sort using Linked list

struct node
{
    int data;
    struct node *link;
    struct node *plink;
};
struct node *first=NULL, *last=NULL;
void swap(struct node* a,struct node* b)
{
    int temp = *a;
    *a = *b;
    *b = temp;
}
 
void qsort(struct node *low, struct node *high)
{
    if(low==NULL || high==NULL || low == high || low->plink == high)
    return ;
 
    struct node *pivot=low, *tmp=low->link;
    while(tmp != high->link)
    {
            if(tmp->data < low->data)
            {
                    swap(pivot->link, tmp);
                    pivot = pivot->link;
            }
            tmp = tmp->link;
    }
    swap(low, pivot);
    qsort(low, pivot->plink);
    qsort(pivot->link, high);
}
Quicksort can be implemented for Linked List only when we can pick a fixed point as pivot. Random QuickSort cannot be efficiently implemented for Linked Lists.

Quick Sort using Double Linked list
struct node
{
    int data;
    struct node *next;
    struct node *prev;
};

void swap(struct node* a,struct node* b)
{
    int temp = *a;
    *a = *b;
    *b = temp;
}
 
void qsort(struct node *low, struct node *high)
{
    if(low==NULL || high==NULL || low == high || low->next == high)
    return ;
 
    struct node *pivot=low, *tmp=low->next;
    while(tmp != high-next)
    {
            if(tmp->data < low->data)
            {
                    swap(pivot->next, tmp);
                    pivot = pivot->next;
            }
            tmp = tmp->next;
    }
    swap(low, pivot);
    qsort(low, pivot->previous);
    qsort(pivot->next, high);
}

Quick Sort

void swap(int* a, int* b)
{
    int t = *a;
    *a = *b;
    *b = t;
}

void QuickSort(int *array, int l, int h)
{
        if(l >= h)return;
        int pivot = array[l]; /*Pivot is the starting element */

        int i = l, temp;
        for(int j = l + 1;j <= h;j++)
        {
                if(array[j] < pivot) 
                {
                        i++;
                        swap(&array[i], &array[j]);
                }
         }
        /* 
         _______________________i____________________
          |pivot|....<=pivot......|....>=pivot......|
        */
        
         swap(&array[i], &array[l]);
        /* 
          ____________________i______________________
          |...<=pivot......|pivot|....>=pivot......|
        */
       
        QuickSort(array, l, i-1);
        QuickSort(array, i+1, h);
}

Although the worst case time complexity of QuickSort is O(n2) 
which is more than many other sorting algorithms like Merge Sort and Heap Sort, 
QuickSort is faster in practice, because its inner loop can be efficiently implemented on most architectures, 
and in most real-world data. quicksort's sequential and localized memory references work well with a cache

Best Case and Average Case: O(nLogn)

QUICK SORT ITERATIVE
int partition (int arr[], int l, int h)
{

    int x = arr[h];
    int i = l;
 
    for (int j = l+1; j <= h; j++)
    {
        if (arr[j] <= x)
        {
            i++;
            swap (&arr[i], &arr[j]);
        }
    }
    swap (&arr[i], &arr[h]);
    return (i);
}

void quickSortIterative (int arr[], int l, int h)
{
    int stack[ h - l + 1 ];
    int top = -1;
 

    stack[ ++top ] = l;
    stack[ ++top ] = h;
 
    while ( top >= 0 )
    {
        h = stack[ top-- ];
        l = stack[ top-- ];
 
        int p = partition( arr, l, h );
 
        if ( p-1 > l ) {
            stack[ ++top ] = l;
            stack[ ++top ] = p - 1;
        }

        if ( p+1 < h ){
            stack[ ++top ] = p + 1;
            stack[ ++top ] = h;
        }
    }
}