#!/usr/bin/python
# -*- coding: utf-8 -*-

import os
import re
import unicodedata
import sys
import csv
import string

from os import listdir, rename
from os.path import isfile, join

## répertoire depuis lequel on lance le script
mypath = os.getcwd()
mypath_input_publications = sys.argv[1]

def findDuplicates():
  publications_map = []

  ##############################
  # PARSING du fichier CSV     #
  ##############################
  with open(mypath_input_publications) as csvfile:
    filereader = csv.reader(csvfile, delimiter='#')
    i = 0
    for row in filereader:
      paper_line = {}
      if i == 0:
        print "First line (and first column) : ",row[0]
      else : 
        #print "Line",i
        id_publi = row[0]
        id_publi = id_publi.translate(None, string.whitespace)
        if id_publi in publications_map:
          print "\t\t FIND DUPLICATE [",id_publi,"]"
        else :
          publications_map.append(id_publi)
      i = i + 1
  return publications_map


findDuplicates()

