Advertisement
Guest User

Untitled

a guest
Aug 3rd, 2015
175
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 0.95 KB | None | 0 0
  1. #!/bin/bash
  2.  
  3. _filename=$1
  4. _outputfile="frobnicated"
  5.  
  6. #For each file:
  7. # get a list of non-unique ids
  8. _non_unique_ids=$(cut -d',' -f1 "${_filename}" | sort | uniq -d)
  9. #
  10. # next, split your file into 2 piles
  11. # use grep -v -F here. -F lets you use your nui's as a single big filter.
  12. # This works even for monstrous files.
  13. _unique_entries=$(grep -v -F "${_non_unique_ids}" "${_filename}")
  14. _dupe_entries=$(grep -F "${_non_unique_ids}" "${_filename}")
  15.  
  16. # dump your unique entries to a file
  17. echo "$_unique_entries" > "$_outputfile"
  18. # just for testing, so you can see for yourself
  19. echo "###### below were dupes" >> "$_outputfile"
  20.  
  21. # pick only the first entry that has a dupe ID
  22. for _dupe in $_non_unique_ids ; do
  23. # if you want to use something other than just grabbing the first dupe
  24. # entry, you'll have to put it here.
  25. _match=$(grep "${_dupe}" <<< "${_dupe_entries}" | head -n1)
  26. if [[ -n "${_match}" ]] ; then
  27. echo "${_match}" >> "$_outputfile"
  28. fi
  29. done
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement