API URL 数据无法获取,状态码 401

huangapple go评论139阅读模式
英文:

API URL data not able to fetch, status code 401

问题

我正在尝试从API URL获取数据到Excel文件中,nseindia api,这只是Nifty指数的OHLC值。

我已经创建了用于日期输入的变量。

我遇到了以下错误:

> 无法从URL获取数据:状态码:401

  1. import requests
  2. import pandas as pd
  3. import xlwings as xw
  4. def NseHistoricalData(index_type, from_date, to_date):
  5. url = f"https://www.nseindia.com/api/historical/indicesHistory?indexType={index_type}&from={from_date}&to={to_date}"
  6. headers = {
  7. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
  8. 'Referer': 'https://www.nseindia.com/',
  9. 'Accept-Language': 'en-US,en;q=0.9',
  10. 'Accept-Encoding': 'gzip, deflate, br'}
  11. session = requests.Session()
  12. response_session = session.get(url, headers=headers)
  13. try:
  14. if response_session.status_code == 200:
  15. data = response_session.json()['data']['indexCloseOnlineRecords']
  16. if data:
  17. # 提取所需的值
  18. eod_open_index_val = data[0]['EOD_OPEN_INDEX_VAL']
  19. eod_high_index_val = data[0]['EOD_HIGH_INDEX_VAL']
  20. eod_close_index_val = data[0]['EOD_CLOSE_INDEX_VAL']
  21. eod_low_index_val = data[0]['EOD_LOW_INDEX_VAL']
  22. # 打开Excel文件
  23. file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
  24. wb = xw.Book(file_name)
  25. sheet_name = "Main"
  26. # 将提取的值写入工作表
  27. wb.sheets[sheet_name].range('M8').value = eod_open_index_val
  28. wb.sheets[sheet_name].range('M9').value = eod_high_index_val
  29. wb.sheets[sheet_name].range('M10').value = eod_close_index_val
  30. wb.sheets[sheet_name].range('M11').value = eod_low_index_val
  31. # 保存对Excel文件的更改
  32. wb.save(file_name)
  33. print("已成功将历史数据写入工作表")
  34. else:
  35. print("未找到指定日期范围的数据")
  36. else:
  37. print(f"无法从URL获取数据状态码{response_session.status_code}")
  38. except Exception as e:
  39. print(f"发生错误{str(e)}")
  40. # 从Excel表格中读取日期范围
  41. file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
  42. wb = xw.Book(file_name)
  43. sheet_name = "Main"
  44. from_date = wb.sheets[sheet_name].range('C15').value
  45. to_date = wb.sheets[sheet_name].range('C16').value
  46. # 指定历史数据的指数类型(例如,NIFTY 50)
  47. index_type = "NIFTY%2050"
  48. # 调用函数以获取并写入历史数据
  49. NseHistoricalData(index_type, from_date, to_date)
英文:

I am trying to get data from the API URL to Excel file
[https://www.nseindia.com/api/historical/indicesHistory?indexType=NIFTY%2050&from=28-07-2023&to=28-07-2023][nseindia api]
it is just OHLC value for nifty index.

I have made variable to date input.
I am getting this error:

> Failed to fetch data from the URL: Status Code: 401

  1. import requests
  2. import pandas as pd
  3. import xlwings as xw
  4. def NseHistoricalData(index_type, from_date, to_date):
  5. url = f"https://www.nseindia.com/api/historical/indicesHistory?indexType={index_type}&from={from_date}&to={to_date}"
  6. headers = {
  7. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36',
  8. 'Referer': 'https://www.nseindia.com/',
  9. 'Accept-Language': 'en-US,en;q=0.9',
  10. 'Accept-Encoding': 'gzip, deflate, br'}
  11. session = requests.Session()
  12. response_session = session.get(url, headers=headers)
  13. try:
  14. if response_session.status_code == 200:
  15. data = response_session.json()['data']['indexCloseOnlineRecords']
  16. if data:
  17. # Extract the required values
  18. eod_open_index_val = data[0]['EOD_OPEN_INDEX_VAL']
  19. eod_high_index_val = data[0]['EOD_HIGH_INDEX_VAL']
  20. eod_close_index_val = data[0]['EOD_CLOSE_INDEX_VAL']
  21. eod_low_index_val = data[0]['EOD_LOW_INDEX_VAL']
  22. # Open the Excel file
  23. file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
  24. wb = xw.Book(file_name)
  25. sheet_name = "Main"
  26. # Write the extracted values to the sheet
  27. wb.sheets[sheet_name].range('M8').value = eod_open_index_val
  28. wb.sheets[sheet_name].range('M9').value = eod_high_index_val
  29. wb.sheets[sheet_name].range('M10').value = eod_close_index_val
  30. wb.sheets[sheet_name].range('M11').value = eod_low_index_val
  31. # Save the changes to the Excel file
  32. wb.save(file_name)
  33. print("Historical data has been written to the sheet successfully.")
  34. else:
  35. print("No data found for the specified date range.")
  36. else:
  37. print(f"Failed to fetch data from the URL. Status Code: {response_session.status_code}")
  38. except Exception as e:
  39. print(f"An error occurred: {str(e)}")
  40. # Read the date range from the Excel sheet
  41. file_name = r"C:\Users\vhind\Desktop\a\Zerodha\Version_v0\Zerodha\NSE_data.xlsm"
  42. wb = xw.Book(file_name)
  43. sheet_name = "Main"
  44. from_date = wb.sheets[sheet_name].range('C15').value
  45. to_date = wb.sheets[sheet_name].range('C16').value
  46. # Specify the index type for historical data (e.g., NIFTY 50)
  47. index_type = "NIFTY%2050"
  48. # Call the function to fetch and write the historical data
  49. NseHistoricalData(index_type, from_date, to_date)

答案1

得分: 1

尝试在发送请求时使用API密钥。您可以从 https://scrapeops.io/app/register/proxy 创建一个免费帐户。

  1. import urllib
  2. def get_scrapeops_url(url):
  3. API_KEY = 'YOUR_API_KEY'
  4. payload = {'api_key': API_KEY, 'url': url}
  5. proxy_url = 'https://proxy.scrapeops.io/v1/?' + urllib.parse.urlencode(payload)
  6. return proxy_url
  7. def NseHistoricalData(index_type, from_date, to_date):
  8. # url and headers
  9. session = requests.Session()
  10. response_session = requests.get(get_scrapeops_url(url), headers=headers)
  11. # rest of the code
英文:

Try using an API Key when you make your request. You can create a free account from https://scrapeops.io/app/register/proxy

  1. import urllib
  2. def get_scrapeops_url(url):
  3. API_KEY = 'YOUR_API_KEY'
  4. payload = {'api_key': API_KEY, 'url': url}
  5. proxy_url = 'https://proxy.scrapeops.io/v1/?' + urllib.parse.urlencode(payload)
  6. return proxy_url
  7. def NseHistoricalData(index_type, from_date, to_date):
  8. # url and headers
  9. session = requests.Session()
  10. response_session = requests.get(get_scrapeops_url(url), headers=headers)
  11. # rest of the code

huangapple
  • 本文由 发表于 2023年7月31日 20:57:39
  • 转载请务必保留本文链接:https://go.coder-hub.com/76803865.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定