Can I know how to get the weekly mobility data (or social distance) data by state level?

@Ryan_Fox_Squire_SafeGraph sorry to bother you again. I am using the same code to combine the v2 data, The erro message shows:

ParserError                               Traceback (most recent call last)
<ipython-input-7-e2172f821831> in <module>
----> 1 df = pd.concat([pd.read_csv(f) for f in all_files])

<ipython-input-7-e2172f821831> in <listcomp>(.0)
----> 1 df = pd.concat([pd.read_csv(f) for f in all_files])

C:\Anaconda3\lib\site-packages\pandas\io\parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, dialect, error_bad_lines, warn_bad_lines, delim_whitespace, low_memory, memory_map, float_precision)
    674         )
    675 
--> 676         return _read(filepath_or_buffer, kwds)
    677 
    678     parser_f.__name__ = name

C:\Anaconda3\lib\site-packages\pandas\io\parsers.py in _read(filepath_or_buffer, kwds)
    452 
    453     try:
--> 454         data = parser.read(nrows)
    455     finally:
    456         parser.close()

C:\Anaconda3\lib\site-packages\pandas\io\parsers.py in read(self, nrows)
   1131     def read(self, nrows=None):
   1132         nrows = _validate_integer("nrows", nrows)
-> 1133         ret = self._engine.read(nrows)
   1134 
   1135         # May alter columns / col_dict

C:\Anaconda3\lib\site-packages\pandas\io\parsers.py in read(self, nrows)
   2035     def read(self, nrows=None):
   2036         try:
-> 2037             data = self._reader.read(nrows)
   2038         except StopIteration:
   2039             if self._first_chunk:

pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader.read()

pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader._read_low_memory()

pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader._read_rows()

pandas\_libs\parsers.pyx in pandas._libs.parsers.TextReader._tokenize_rows()

pandas\_libs\parsers.pyx in pandas._libs.parsers.raise_parser_error()

ParserError: Error tokenizing data. C error: out of memory```
all_files = []
for month in os.listdir(dir_to_read):
  month_path = os.path.join(dir_to_read,month)
  for day in os.listdir(month_path):
    day_path = os.path.join(month_path,day)
    for file in os.listdir(day_path):
      all_files.append(os.path.join(day_path, file))```
all_files = []
for month in os.listdir(dir_to_read):
  month_path = os.path.join(dir_to_read,month)
  for day in os.listdir(month_path):
    day_path = os.path.join(month_path,day)
    for file in os.listdir(day_path):
      all_files.append(os.path.join(day_path, file))